text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
#!/usr/bin/env python
#
# threat_note v4.0 #
# Developed By: Brian Warehime #
# Defense Point Security (defpoint.com) #
# October 26, 2015 #
#
import argparse
import os.path
from app import app
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-H', '--host', default='127.0.0.1', help='Specify the host IP address')
parser.add_argument('-p', '--port', default=8888, help='Specify port to listen on')
parser.add_argument('-d', '--debug', default=False, help='Run in debug mode', action='store_true')
parser.add_argument('-D', '--database', default='threatnote.db', help='Path and name of SQLite database')
args = parser.parse_args()
if args.database == 'threatnote.db':
path = os.path.join(os.getcwd(), args.database)
else:
path = os.path.join(args.database)
print path
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + path
if not os.path.isfile(path):
from app import db
print 'Initializing database'
db.create_all()
app.run(host=args.host, port=args.port, debug=args.debug)
|
alxhrck/threat_note
|
run.py
|
Python
|
apache-2.0
| 1,225
|
[
"Brian"
] |
69b174db7a1d90a999b8a28b9be6c7d06d644e99d8e99ef17e911528ae58d545
|
#!/usr/bin/env python
import os
import sys
import argparse
from xml.dom.minidom import Document
class Tool():
def __init__( self, ap_parser, **kwargs ):
self.ap_parser = ap_parser
self.name = parser.prog
self.id = parser.prog
self.version = kwargs.get('version', None) or str(parser.version) or '0.1'
self.description = kwargs.get('version', None) or parser.description or 'Insert Short Description'
self.blacklisted_parameters = ['--version', '--verbose', '--help']
def parse( self ):
self.doc = Document( )
self.tool = self.create_tool( )
self.create_description( )
self.create_requirements( )
self.create_stdio( )
self.doc.appendChild( self.tool )
self.create_command( )
self.create_inputs( )
self.create_outputs( )
self.create_help( )
self.create_reference()
def convert_to_galaxy( self ):
self.doc.writexml(sys.stdout, indent=" ", addindent=" ", newl='\n', encoding="UTF-8")
def create_tool( self ):
tool = self.doc.createElement("tool")
tool.setAttribute( "id", self.name )
tool.setAttribute( "version", self.version )
tool.setAttribute( "name", self.name )
return tool
def create_description( self ):
description_node = self.doc.createElement("description")
description = self.doc.createTextNode( self.description )
description_node.appendChild( description )
self.tool.appendChild( description_node )
def get_param_name( self, param ):
long_param = self.get_longest_param_name( param )
return long_param.replace('-', '_').strip('_')
def get_longest_param_name( self, param ):
if len( param.option_strings ) == 1:
return param.option_strings[0]
else:
if len( param.option_strings[0] ) > len( param.option_strings[1] ):
return param.option_strings[0]
else:
return param.option_strings[1]
def get_param_type( self, param ):
if type(param) in [argparse._StoreTrueAction, argparse._StoreFalseAction]:
return 'boolean'
elif type(param) == argparse._StoreAction:
if param.choices is not None:
return 'select'
if param.type == int:
return 'integer'
elif param.type == float:
return 'float'
return 'text'
def is_blacklisted( self, param ):
for name in param.option_strings:
if name in self.blacklisted_parameters:
return True
return False
def create_command( self ):
final_command = self.name + '\n'
for param in self.extract_parameters( ):
command = ''
param_name = self.get_param_name( param )
param_type = self.get_param_type( param )
if self.is_blacklisted( param ):
continue
if param_type == 'boolean':
command += '$%s\n' % ( param_name )
else:
if param_type == 'text':
command += "\n#if str($%(param_name)s).strip() != '':\n " % {"param_name": param_name}
command = "%s '${%s}'\n" % (self.get_longest_param_name( param ), param_name)
if param_type == 'text':
command += "#end if\n"
final_command += command
command_node = self.doc.createElement("command")
command_text_node = self.doc.createCDATASection( final_command.strip() )
command_node.appendChild(command_text_node)
self.tool.appendChild(command_node)
def create_inputs( self ):
inputs_node = self.doc.createElement("inputs")
collect_inputs = list()
for param in self.extract_parameters( ):
if self.is_blacklisted( param ):
continue
inputs_node.appendChild( self.create_param_node( param ) )
self.tool.appendChild(inputs_node)
def extract_parameters( self ):
"""
ToDo: Add some parameter filtering here and react on nested parameters
"""
parameters = []
for parameter in self.ap_parser._actions:
yield parameter
def create_param_node( self, param ):
param_name = self.get_param_name( param )
param_type = self.get_param_type( param )
param_node = self.doc.createElement( "param" )
param_node.setAttribute( "name", param_name )
label = ""
if param.help is not None:
label = param.help
else:
label = "%s parameter" % self.get_longest_param_name( param )
param_node.setAttribute("label", label)
param_node.setAttribute("help", "(%s)" % self.get_longest_param_name( param ))
if param_type is None:
raise "Unrecognized parameter type '%(type)' for parameter '%(name)'" % {"type":param_type, "name":param_name}
param_node.setAttribute("type", param_type)
if param.required:
param_node.setAttribute("optional", str(not param.required))
# check for parameters with restricted values (which will correspond to a "select" in galaxy)
if param_type == 'select':
for choice in param.choices:
option_node = self.doc.createElement( "option" )
option_node.setAttribute( "value", str(choice) )
option_label = self.doc.createTextNode( str(choice) )
option_node.appendChild( option_label )
param_node.appendChild( option_node )
return param_node
if param_type == "text":
# add size attribute... this is the length of a textbox field in Galaxy (it could also be 15x2, for instance)
param_node.setAttribute("size", "20")
if param_type == 'boolean':
if type(param) == argparse._StoreTrueAction:
param_node.setAttribute("truevalue", "%s" % self.get_longest_param_name( param ))
param_node.setAttribute("falsevalue", '')
elif type(param) == argparse._StoreFalseAction:
param_node.setAttribute("falsevalue", "%s" % self.get_longest_param_name( param ))
param_node.setAttribute("truevalue", '')
param_node.setAttribute("checked", str(param.default))
# check for default value
if param.default is not None:
if param_type != "boolean":
param_node.setAttribute("value", str(param.default))
else:
param_node.setAttribute("value", '')
return param_node
def create_outputs( self ):
"""
How to guess the output parameters, usualy they are not of type FILE
whitelist?
"""
outputs_node = self.doc.createElement("outputs")
outputs_node.appendChild( self.create_data_node( ) )
self.tool.appendChild(outputs_node)
def create_data_node( self ):
data_node = self.doc.createElement("data")
data_node.setAttribute("name", 'outfile')
data_node.setAttribute("format", 'data')
data_node.setAttribute("label", '${tool.name} on ${on_string}')
data_node.appendChild( self.create_filter_node() )
data_node.appendChild( self.create_change_format_node() )
return data_node
def create_filter_node( self, data_format = 'EXAMPL'):
"""
<filter>'bam' in outputs</filter>
"""
filter_node = self.doc.createElement("filter")
option_label = self.doc.createTextNode("'%s' in param_out_type" % (data_format))
filter_node.appendChild(option_label)
return filter_node
def create_change_format_node( self, data_formats = ['foo', 'bar'], input_ref = 'infile'):
"""
<change_format>
<when input="secondary_structure" value="true" format="text"/>
</change_format>
"""
change_format_node = self.doc.createElement("change_format")
for data_format in data_formats:
when_node = self.doc.createElement("when")
when_node.setAttribute('input', input_ref)
when_node.setAttribute('value', data_format)
when_node.setAttribute('format', data_format)
change_format_node.appendChild( when_node )
return change_format_node
def create_requirements( self ):
"""
<requirements>
<requirement type="binary">@EXECUTABLE@</requirement>
<requirement type="package" version="1.1.1">TODO</requirement>
</requirements>
"""
requirements_node = self.doc.createElement("requirements")
requirement_node = self.doc.createElement("requirement")
requirement_node.setAttribute("type", "binary")
requirement_text_node = self.doc.createTextNode('@EXECUTABLE@')
requirement_node.appendChild(requirement_text_node)
requirements_node.appendChild(requirement_node)
requirement_node = self.doc.createElement("requirement")
requirement_node.setAttribute("type", "package")
requirement_node.setAttribute("version", "1.1.1")
requirement_text_node = self.doc.createTextNode('TODO')
requirement_node.appendChild(requirement_text_node)
requirements_node.appendChild(requirement_node)
self.tool.appendChild( requirements_node )
def create_reference( self ):
"""
<citations>
<citation type="doi">10.1371/journal.pcbi.1003153</citation>
</citations>
"""
citations_node = self.doc.createElement("citations")
citation_node = self.doc.createElement("citation")
citation_node.setAttribute( "type", "doi" )
citation_text_node = self.doc.createTextNode('10.1371/journal.pcbi.1003153')
citation_node.appendChild(citation_text_node)
citations_node.appendChild( citation_node )
self.tool.appendChild( citations_node )
def create_stdio( self ):
"""
<!-- Anything other than zero is an error -->
<exit_code range="1:" />
<exit_code range=":-1" />
<!-- In case the return code has not been set propery check stderr too -->
<regex match="Error:" />
<regex match="Exception:" />
"""
stdio_node = self.doc.createElement("stdio")
exit_code_node = self.doc.createElement("exit_code")
exit_code_node.setAttribute("range", "1:")
stdio_node.appendChild(exit_code_node)
exit_code_node = self.doc.createElement("exit_code")
exit_code_node.setAttribute("range", ":-1")
stdio_node.appendChild(exit_code_node)
exit_code_node = self.doc.createElement("regex")
exit_code_node.setAttribute("match", "Error:")
stdio_node.appendChild(exit_code_node)
exit_code_node = self.doc.createElement("regex")
exit_code_node.setAttribute("match", "Exception:")
stdio_node.appendChild(exit_code_node)
self.tool.appendChild( stdio_node)
def create_help( self ):
"""
**What it does**
+ some help from the argparse definitions
"""
help_text = '**What it does**\n\n'
help_text += self.ap_parser.description or ' Insert Short Description'
help_text += '\n'
help_text += self.ap_parser.epilog or 'Isert long description with website link'
help_text += '\n'
help_text += self.ap_parser.format_help() or 'insert help instructions'
help_text += '\n'
help_text += self.ap_parser.format_usage()
help_text += '\n'
help_node = self.doc.createElement("help")
help_text_node = self.doc.createCDATASection( help_text )
help_node.appendChild( help_text_node )
self.tool.appendChild(help_node)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Process some integers.')
parser.version = 3.4
# Exaple parser from the platypus code and additional tests
parser.add_argument("-o", "--output", dest="output", help="Output SNP data file", default="AllVariants.vcf")
parser.add_argument("--refFile",dest="refFile", help="Fasta file of reference. Index must be in same directory", required=True)
parser.add_argument("--skipRegionsFile", dest="skipRegionsFile", help = "region as comma-separated list of chr:start-end, or just list of chr, or nothing", default=None)
parser.add_argument("--bufferSize", dest="bufferSize", type=int, help = "Data will be buffered in regions of this size", default=100000, required=False)
parser.add_argument("--minReads", dest="minReads", help="Minimum number of supporting reads required before a variant candidate will be considered.", type=int, default=2)
parser.add_argument("--verbosity", dest="verbosity", help="Level of logging", type=int, default=2)
parser.add_argument("--printAlignments", dest="printAlignments", help="If 1, then alignments of reads to haplotypes will be printed to the log file", type=int, default=0)
parser.add_argument("--maxReadLength", dest="rlen", help="Maximum read length", type=int, default=100)
parser.add_argument("--logFileName", dest="logFileName", help="Name of log file", default="log.txt")
parser.add_argument("--nCPU", dest="nCPU", help="Number of processors to use", type=int, default=1)
parser.add_argument("--parseNCBI", dest="parseNCBI", help="", type=int, default=0)
parser.add_argument('--door', type=int, choices=range(1, 4))
parser.add_argument('--floor', choices=['dance', 'rock', 'pop', 'metal'], help='baz help')
parser.add_argument('--true-feature', dest='feature', action='store_true')
parser.add_argument('--false-feature', dest='feature', action='store_false')
tool = Tool( parser )
tool.parse()
tool.convert_to_galaxy()
|
bgruening/argparse2galaxy
|
argparse2galaxy.py
|
Python
|
mit
| 14,043
|
[
"Galaxy"
] |
4c653aceae27320a419eeac5f769d2c3c125b9d693e4aad87cf363058602d111
|
# !/usr/bin/env python
# coding: utf-8
"""
pyvse
~~~~~
A high-level API designed for MarketWatch Virtual Stock Exchange games.
Author: Kevin Chen
Email: kvchen@berkeley.edu
Contributor: Andrew Han
Email: handrew@stanford.edu
"""
import requests
import re
import json
from datetime import datetime, date, timedelta
from time import mktime
from bs4 import BeautifulSoup
from math import fabs
STOCK_ACTIONS = ["Buy", "Sell", "Short", "Cover"]
TIME_DELAY = 5
BASE_URL = "http://www.marketwatch.com"
ID_URL = "https://id.marketwatch.com"
URL_SUFFIX = {
"status": BASE_URL + "/user/login/status",
"profile": BASE_URL + "/my",
"login": ID_URL + "/auth/submitlogin.json",
"game": BASE_URL + "/game/{0}",
"trade": BASE_URL + "/game/{0}/trade?week=1",
"submit_order": BASE_URL + "/game/{0}/trade/submitorder?week=1",
"holdings_info": BASE_URL + "/game/{0}/portfolio/Holdings?partial=True",
"value": BASE_URL + "/game/{0}/portfolio/Holdings"
}
def mw_url(suffix, *args):
return URL_SUFFIX[suffix].format(*args)
class VSESession(object):
def __init__(self, delay = 5):
"""Initializes a VSESession, through which all API calls are routed.
@param delay: Seconds to delay scraping data to prevent rate-limiting
"""
self.session = requests.Session()
self.delay = delay
self.games = {}
def login(self, username, password):
"""Logs a VSESession into the Marketwatch VSE.
@param username: email used with the Marketwatch VSE
@param password: corresponding password
"""
userdata = {"username": username, "password": password}
r = self.session.get(mw_url("login"), params=userdata, verify=True)
# MarketWatch returns a validation URL, which we visit to complete
# the login handshake.
conf_url = json.loads(r.text)["url"]
try:
self.session.get(conf_url)
except requests.exceptions.ConnectionError as e:
print("An error may occur during day trading. This is normal.")
print(e.args[0].reason)
# Confirm that we have logged in successfully
if self.session.get(mw_url("status")).url != mw_url("profile"):
print("Invalid username/password combination.")
else:
print("Successful login!")
def game(self, game_id):
"""Returns a Game object.
@param game_id:
"""
if game_id in self.games:
return self.games[game_id]
else:
self.games[game_id] = Game(game_id, self)
return self.games[game_id]
class Game(object):
order_headers = {'Content-Type': 'application/json; charset=utf-8'}
def __init__(self, game_id, vse_session):
"""Creates a Game object, parented to a VSESession."""
self.game_id = game_id
self.vse_session = vse_session
self.positions = {} # array of stock objects
self.__updatePositions()
@property
def value(self):
r = self.vse_session.session.get(mw_url("value", self.game_id))
soup = BeautifulSoup(r.text)
worth = soup.find('ul', {"class": "performance"}).li.find('span', {"class": "data"}).getText()
worth = worth.replace("$", "").replace(",", "")
return float(worth)
def transaction(self, ticker, shares, action):
"""Carries out a transaction on a Stock object.
@param shares: Number of shares to be exchanged in this transaction.
@param action: Type of transaction to be carried out.
"""
stock = self.stock(ticker)
if action not in STOCK_ACTIONS:
print("Invalid stock action.")
return
payload = [{"Fuid": stock.trading_symbol,
"Shares": str(shares),
"Type": action}]
p = self.vse_session.session.post(mw_url("submit_order", self.game_id),
headers = self.order_headers, data=json.dumps(payload))
resp = json.loads(p.text)
if resp["succeeded"] == False:
print("Transaction for {0} failed. {1}"
.format(stock.symbol, resp["message"]))
return
self.__updatePositions()
def __getNumberOfSharesToInvest(self, ticker, moneyToSpend):
obj = self.stock(ticker)
price = obj.price
numSharesToInvest = int(moneyToSpend / price)
return numSharesToInvest
def rebalance(self, stockWeights):
self.__updatePositions()
value = self.value
if (len(self.positions) == 0):
for ticker in stockWeights:
weight = stockWeights[ticker]
moneyToSpend = weight * value
numSharesToBuy = self.__getNumberOfSharesToInvest(ticker, moneyToSpend)
self.transaction(ticker, numSharesToBuy, "Buy")
else:
# Note that we are assuming some amount of margin.
ownedTickers = self.__positionNames()
targetTickers = list(stockWeights.keys())
""" Positions we currently have that we need to exit completely """
for ticker in ownedTickers:
if ticker not in targetTickers:
numSharesOwned = self.positions[ticker].position
action = "Sell" if numSharesOwned > 0 else "Cover"
numSharesOwned = fabs(numSharesOwned)
self.transaction(ticker, numSharesOwned, action)
self.__updatePositions()
############################
ownedTickers = self.__positionNames()
targetTickers = list(stockWeights.keys())
""" Overlap between positions we have a little bit of and need to rebalance """
for ticker in ownedTickers:
if ticker in targetTickers:
weight = stockWeights[ticker]
moneyToInvest = weight * value
numSharesToHave = self.__getNumberOfSharesToInvest(ticker, moneyToInvest)
currentPosition = self.positions[ticker].position
action = "Buy"
if (currentPosition < 0):
if (numSharesToHave > currentPosition):
action = "Cover"
else:
action = "Short"
else:
if (numSharesToHave > currentPosition):
action = "Buy"
else:
action = "Sell"
difference = fabs(currentPosition - numSharesToHave)
self.transaction(ticker, difference, action)
self.__updatePositions()
############################
ownedTickers = self.__positionNames()
targetTickers = list(stockWeights.keys())
""" Positions we don't have that we need to initialize """
for ticker in targetTickers:
if ticker not in ownedTickers:
weight = stockWeights[ticker]
moneyToSpend = weight * value
numSharesToBuy = self.__getNumberOfSharesToInvest(ticker, moneyToSpend)
self.transaction(ticker, numSharesToBuy, "Buy")
self.__updatePositions()
def __updatePositions(self):
r = self.vse_session.session.get(mw_url("holdings_info", self.game_id))
soup = BeautifulSoup(r.text)
try:
allRows = soup.find('table', {'class': 'highlight'}).tbody.findAll('tr')
except AttributeError:
allRows = []
for i in range(0, len(allRows)):
symbol = allRows[i]['data-ticker']
trading_symbol = allRows[i]['data-symbol']
numShares = int(float(allRows[i]['data-shares']))
tradeType = allRows[i]['data-type']
position = numShares
if (tradeType == "Short"):
position = numShares * -1
stockObj = self.stock(symbol, trading_symbol = trading_symbol, position = position)
self.positions[symbol] = (stockObj)
def __positionNames(self):
return list(self.positions.keys())
def buy(self, ticker, shares):
self.transaction(ticker, shares, "Buy")
def sell(self, ticker, shares):
self.transaction(ticker, shares, "Sell")
def short(self, ticker, shares):
self.transaction(ticker, shares, "Short")
def cover(self, ticker, shares):
self.transaction(ticker, shares, "Cover")
def stock(self, symbol, trading_symbol = None, position = 0):
return Stock(symbol, trading_symbol, position, self)
""" Really only functions so that we can get trading symbol easily for transactions within the Game object """
class Stock():
def __init__(self, symbol, trading_symbol, position, game):
"""
@param symbol: Normal ticker symbol of a stock
@param trading_symbol: the symbol that Marketwatch uses to trade
"""
self.symbol = symbol
self.game = game
self.trading_symbol = trading_symbol if type(trading_symbol) != type(None) else self.get_trading_symbol()
self.position = position
def get_trading_symbol(self):
payload = {"search": self.symbol, "view": "grid", "partial": True}
p = self.game.vse_session.session.post(mw_url("trade", self.game.game_id), params=payload)
data = BeautifulSoup(p.text)
try:
symbol = data.find("div", {"class": "chip"})['data-symbol']
except:
print "Could not find symbol: %s." % self.symbol
symbol = ""
self.trading_symbol = symbol
return symbol
# Retrieves the price of a stock by scraping Yahoo! Finance. Returns a float.
@property
def price(self):
standardTicker = self.symbol
yfID = "yfs_l84_" + standardTicker.lower()
try:
yfURL = "http://finance.yahoo.com/quotes/" + standardTicker
r = requests.get(yfURL)
soup = BeautifulSoup(r.text)
except:
yfURL = "http://finance.yahoo.com/q?s=" + standardTicker + "&ql=1"
r = requests.get(yfURL)
soup = BeautifulSoup(r.text)
try:
price = soup.find("span", {"id": yfID}).getText()
price = float(price)
except AttributeError:
price = self.retrievePrice()
return price
|
kvchen/pyvse
|
pyvse2.py
|
Python
|
mit
| 10,590
|
[
"VisIt"
] |
ce7cafb3e34df3bbe75057eda69599136d4eae0d816f06811e5ca8633dc87d7b
|
# -*- coding: utf-8 -*-
#
# abstar documentation build configuration file, created by
# sphinx-quickstart on Mon Apr 11 12:45:25 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
import sphinx_rtd_theme
# from abstar.version import __version__
if sys.version_info[0] > 2:
from unittest.mock import MagicMock
else:
from mock import MagicMock
if os.environ.get('READTHEDOCS', None) == 'True':
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return Mock()
MOCK_MODULES = ['pygtk', 'gtk', 'gobject', 'argparse', 'numpy', 'nwalign', 'pandas', 'abutils', 'dask', 'dask.dataframe',
'abutils.utils', 'abutils.core', 'abutils.core.sequence', 'abutils.utils.log', 'abutils.utils.alignment',
'abutils.utils.codons', 'abutils.utils.pipeline', 'abutils.utils.decorators', 'abutils.utils.progbar',
'biopython', 'celery', 'pymongo', 'scikit-bio', 'BaseSpacePy', 'BaseSpacePy.api',
'BaseSpacePy.model', 'BaseSpacePy.api.BaseSpaceAPI', 'BaseSpacePy.model.QueryParameters',
'Bio', 'Bio.Align', 'Bio.Alphabet', 'Bio.SeqIO', 'Bio.Seq', 'Bio.SeqRecord',
'Bio.Blast', 'Bio.Blast.Applications']
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'abstar'
copyright = u'2018, Bryan Briney'
author = u'Bryan Briney'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.3.4'
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# html_theme = 'alabaster'
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'abstardoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'abstar.tex', u'abstar Documentation',
u'Bryan Briney', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'abstar', u'abstar Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'abstar', u'abstar Documentation',
author, 'abstar', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
autoclass_content = 'both'
autodoc_member_order = 'bysource'
autodoc_default_flags = ['members']
|
briney/abstar
|
docs/source/conf.py
|
Python
|
mit
| 10,545
|
[
"BLAST",
"Biopython",
"scikit-bio"
] |
1bc47f483b09e5a1cf3a92e939a162caeaf847f9afb209afbc77acc7a5295442
|
import sys,os,unittest,ctypes
from lammps import lammps, LMP_VAR_ATOM, LMP_STYLE_GLOBAL, LMP_TYPE_VECTOR, LAMMPS_DOUBLE_2D, LAMMPS_AUTODETECT
has_manybody=False
try:
machine=None
if 'LAMMPS_MACHINE_NAME' in os.environ:
machine=os.environ['LAMMPS_MACHINE_NAME']
lmp=lammps(name=machine)
has_manybody = lmp.has_style("pair","sw")
lmp.close()
except:
pass
class PythonCommand(unittest.TestCase):
def setUp(self):
machine=None
if 'LAMMPS_MACHINE_NAME' in os.environ:
machine=os.environ['LAMMPS_MACHINE_NAME']
self.lmp=lammps(name=machine,
cmdargs=['-nocite',
'-log','none',
'-echo','screen',
'-var','zpos','1.5',
'-var','x','2'])
# create demo input strings and files
# a few commands to set up a box with a single atom
self.demo_input="""
region box block 0 $x 0 2 0 2
create_box 1 box
create_atoms 1 single 1.0 1.0 ${zpos}
"""
# another command to add an atom and use a continuation line
self.cont_input="""
create_atoms 1 single &
0.2 0.1 0.1
"""
self.demo_file='in.test'
with open(self.demo_file,'w') as f:
f.write(self.demo_input)
self.cont_file='in.cont'
with open(self.cont_file,'w') as f:
f.write(self.cont_input)
# clean up temporary files
def tearDown(self):
if os.path.exists(self.demo_file):
os.remove(self.demo_file)
if os.path.exists(self.cont_file):
os.remove(self.cont_file)
##############################
def testFile(self):
"""Test reading commands from a file"""
natoms = self.lmp.get_natoms()
self.assertEqual(natoms,0)
self.lmp.file(self.demo_file)
natoms = self.lmp.get_natoms()
self.assertEqual(natoms,1)
self.lmp.file(self.cont_file)
natoms = self.lmp.get_natoms()
self.assertEqual(natoms,2)
def testNoFile(self):
"""Test (not) reading commands from no file"""
self.lmp.file(None)
natoms = self.lmp.get_natoms()
self.assertEqual(natoms,0)
def testCommand(self):
"""Test executing individual commands"""
natoms = self.lmp.get_natoms()
self.assertEqual(natoms,0)
cmds = self.demo_input.splitlines()
for cmd in cmds:
self.lmp.command(cmd)
natoms = self.lmp.get_natoms()
self.assertEqual(natoms,1)
def testCommandsList(self):
"""Test executing commands from list of strings"""
natoms = self.lmp.get_natoms()
self.assertEqual(natoms,0)
cmds = self.demo_input.splitlines()+self.cont_input.splitlines()
self.lmp.commands_list(cmds)
natoms = self.lmp.get_natoms()
self.assertEqual(natoms,2)
def testCommandsString(self):
"""Test executing block of commands from string"""
natoms = self.lmp.get_natoms()
self.assertEqual(natoms,0)
self.lmp.commands_string(self.demo_input+self.cont_input)
natoms = self.lmp.get_natoms()
self.assertEqual(natoms,2)
def testNeighborListSimple(self):
self.lmp.commands_string("""
units lj
atom_style atomic
atom_modify map array
boundary f f f
region box block 0 2 0 2 0 2
create_box 1 box""")
x = [ 1.0, 1.0, 1.0, 1.0, 1.0, 1.5 ]
types = [1, 1]
self.assertEqual(self.lmp.create_atoms(2, id=None, type=types, x=x), 2)
nlocal = self.lmp.extract_global("nlocal")
self.assertEqual(nlocal, 2)
self.lmp.commands_string("""
mass 1 1.0
velocity all create 3.0 87287
pair_style lj/cut 2.5
pair_coeff 1 1 1.0 1.0 2.5
neighbor 0.1 bin
neigh_modify every 20 delay 0 check no
run 0 post no""")
idx = self.lmp.find_pair_neighlist("lj/cut")
self.assertNotEqual(idx, -1)
self.assertEqual(self.lmp.find_pair_neighlist("morse"), -1)
nlist = self.lmp.get_neighlist(idx)
self.assertEqual(len(nlist), 2)
atom_i, numneigh_i, neighbors_i = nlist[0]
atom_j, numneigh_j, _ = nlist[1]
self.assertEqual(atom_i, 0)
self.assertEqual(atom_j, 1)
self.assertEqual(numneigh_i, 1)
self.assertEqual(numneigh_j, 0)
self.assertEqual(1, neighbors_i[0])
def testNeighborListHalf(self):
self.lmp.commands_string("""
boundary f f f
units real
region box block -5 5 -5 5 -5 5
create_box 1 box
mass 1 1.0
pair_style lj/cut 4.0
pair_coeff 1 1 0.2 2.0
""")
x = [ 0.0, 0.0, 0.0, -1.1, 0.0, 0.0, 1.0, 0.0, 0.0,
0.0, -1.1, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, -1.1,
0.0, 0.0, 1.0 ]
tags = [1, 2, 3, 4, 5, 6, 7]
types = [1, 1, 1, 1, 1, 1, 1]
self.assertEqual(self.lmp.create_atoms(7, id=tags, type=types, x=x), 7)
nlocal = self.lmp.extract_global("nlocal")
self.assertEqual(nlocal, 7)
self.lmp.command("run 0 post no")
self.assertEqual(self.lmp.find_pair_neighlist("lj/cut"),0)
nlist = self.lmp.get_neighlist(0)
self.assertEqual(nlist.size, 7)
for i in range(0,nlist.size):
idx, num, neighs = nlist.get(i)
self.assertEqual(idx,i)
self.assertEqual(num,nlocal-1-i)
# look up neighbor list by atom index
num, neighs = nlist.find(2)
self.assertEqual(num,4)
self.assertIsNotNone(neighs,None)
# this one will fail
num, neighs = nlist.find(10)
self.assertEqual(num,-1)
self.assertIsNone(neighs,None)
@unittest.skipIf(not has_manybody,"Full neighbor list test for manybody potential")
def testNeighborListFull(self):
self.lmp.commands_string("""
boundary f f f
units metal
region box block -5 5 -5 5 -5 5
create_box 1 box
mass 1 1.0
pair_style sw
pair_coeff * * Si.sw Si
""")
x = [ 0.0, 0.0, 0.0, -1.1, 0.0, 0.0, 1.0, 0.0, 0.0,
0.0, -1.1, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, -1.1,
0.0, 0.0, 1.0 ]
tags = [1, 2, 3, 4, 5, 6, 7]
types = [1, 1, 1, 1, 1, 1, 1]
self.assertEqual(self.lmp.create_atoms(7, id=tags, type=types, x=x), 7)
nlocal = self.lmp.extract_global("nlocal")
self.assertEqual(nlocal, 7)
self.lmp.command("run 0 post no")
self.assertEqual(self.lmp.find_pair_neighlist("sw"),0)
nlist = self.lmp.get_neighlist(0)
self.assertEqual(nlist.size, 7)
for i in range(0,nlist.size):
idx, num, neighs = nlist.get(i)
self.assertEqual(idx,i)
self.assertEqual(num,nlocal-1)
@unittest.skipIf(not has_manybody,"Hybrid neighbor list test for manybody potential")
def testNeighborListHybrid(self):
self.lmp.commands_string("""
boundary f f f
units metal
region box block -5 5 -5 5 -5 5
create_box 2 box
mass * 1.0
pair_style hybrid/overlay morse 4.0 lj/cut 4.0 lj/cut 4.0 sw
pair_coeff * * sw Si.sw Si NULL
pair_coeff 1 2 morse 0.2 2.0 2.0
pair_coeff 2 2 lj/cut 1 0.1 2.0
pair_coeff * * lj/cut 2 0.01 2.0
""")
x = [ 0.0, 0.0, 0.0, -1.1, 0.0, 0.0, 1.0, 0.0, 0.0,
0.0, -1.1, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, -1.1,
0.0, 0.0, 1.0 ]
tags = [1, 2, 3, 4, 5, 6, 7]
types = [1, 1, 1, 1, 2, 2, 2]
self.assertEqual(self.lmp.create_atoms(7, id=tags, type=types, x=x), 7)
nlocal = self.lmp.extract_global("nlocal")
self.assertEqual(nlocal, 7)
self.lmp.command("run 0 post no")
# valid and invalid lookups
self.assertNotEqual(self.lmp.find_pair_neighlist("sw"),-1)
self.assertNotEqual(self.lmp.find_pair_neighlist("morse"),-1)
self.assertNotEqual(self.lmp.find_pair_neighlist("lj/cut",nsub=1),-1)
self.assertNotEqual(self.lmp.find_pair_neighlist("lj/cut",nsub=2),-1)
self.assertEqual(self.lmp.find_pair_neighlist("lj/cut"),-1)
self.assertEqual(self.lmp.find_pair_neighlist("hybrid/overlay"),-1)
self.assertNotEqual(self.lmp.get_neighlist(4).size,0)
self.assertEqual(self.lmp.get_neighlist(5).size,-1)
# full neighbor list for 4 type 1 atoms
# all have 3 type 1 atom neighbors
nlist = self.lmp.get_neighlist(self.lmp.find_pair_neighlist("sw"))
self.assertEqual(nlist.size, 4)
for i in range(0,nlist.size):
idx, num, neighs = nlist.get(i)
self.assertEqual(idx,i)
self.assertEqual(num,3)
# half neighbor list for all pairs between type 1 and type 2
# 4 type 1 atoms with 3 type 2 neighbors and 3 type 2 atoms without neighbors
nlist = self.lmp.get_neighlist(self.lmp.find_pair_neighlist("morse"))
self.assertEqual(nlist.size, 7)
for i in range(0,nlist.size):
idx, num, neighs = nlist.get(i)
if (i < 4): self.assertEqual(num,3)
else: self.assertEqual(num,0)
# half neighbor list between type 2 atoms only
# 3 pairs with 2, 1, 0 neighbors
nlist = self.lmp.get_neighlist(self.lmp.find_pair_neighlist("lj/cut",nsub=1))
self.assertEqual(nlist.size, 3)
for i in range(0,nlist.size):
idx, num, neighs = nlist.get(i)
self.assertEqual(num,2-i)
# half neighbor list between all pairs. same as simple lj/cut case
nlist = self.lmp.get_neighlist(self.lmp.find_pair_neighlist("lj/cut",nsub=2))
self.assertEqual(nlist.size, 7)
for i in range(0,nlist.size):
idx, num, neighs = nlist.get(i)
self.assertEqual(num,nlocal-1-i)
def testNeighborListCompute(self):
self.lmp.commands_string("""
boundary f f f
units real
region box block -5 5 -5 5 -5 5
create_box 1 box
mass 1 1.0
pair_style lj/cut 4.0
pair_coeff 1 1 0.2 2.0
compute dist all pair/local dist
fix dist all ave/histo 1 1 1 0.0 3.0 4 c_dist mode vector
thermo_style custom f_dist[*]
""")
x = [ 0.0, 0.0, 0.0, -1.1, 0.0, 0.0, 1.0, 0.0, 0.0,
0.0, -1.1, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, -1.1,
0.0, 0.0, 1.0 ]
tags = [1, 2, 3, 4, 5, 6, 7]
types = [1, 1, 1, 1, 1, 1, 1]
self.assertEqual(self.lmp.create_atoms(7, id=tags, type=types, x=x), 7)
nlocal = self.lmp.extract_global("nlocal")
self.assertEqual(nlocal, 7)
self.lmp.command("run 0 post no")
# check compute data from histogram summary
nhisto = self.lmp.extract_fix("dist",LMP_STYLE_GLOBAL,LMP_TYPE_VECTOR,nrow=0)
nskip = self.lmp.extract_fix("dist",LMP_STYLE_GLOBAL,LMP_TYPE_VECTOR,nrow=1)
minval = self.lmp.extract_fix("dist",LMP_STYLE_GLOBAL,LMP_TYPE_VECTOR,nrow=2)
maxval = self.lmp.extract_fix("dist",LMP_STYLE_GLOBAL,LMP_TYPE_VECTOR,nrow=3)
# 21 pair distances counted, none skipped, smallest 1.0, largest 2.1
self.assertEqual(nhisto,21)
self.assertEqual(nskip,0)
self.assertEqual(minval,1.0)
self.assertEqual(maxval,2.1)
self.assertNotEqual(self.lmp.find_pair_neighlist("lj/cut"),-1)
self.assertNotEqual(self.lmp.find_compute_neighlist("dist"),-1)
self.assertEqual(self.lmp.find_compute_neighlist("xxx"),-1)
self.assertEqual(self.lmp.find_fix_neighlist("dist"),-1)
# the compute has a half neighbor list
nlist = self.lmp.get_neighlist(self.lmp.find_compute_neighlist("dist"))
self.assertEqual(nlist.size, 7)
for i in range(0,nlist.size):
idx, num, neighs = nlist.get(i)
self.assertEqual(idx,i)
self.assertEqual(num,nlocal-1-i)
def test_extract_box_non_periodic(self):
self.lmp.command("boundary f f f")
self.lmp.command("region box block 0 2 0 2 0 2")
self.lmp.command("create_box 1 box")
boxlo, boxhi, xy, yz, xz, periodicity, box_change = self.lmp.extract_box()
self.assertEqual(boxlo, [0.0, 0.0, 0.0])
self.assertEqual(boxhi, [2.0, 2.0, 2.0])
self.assertEqual(xy, 0.0)
self.assertEqual(yz, 0.0)
self.assertEqual(xz, 0.0)
self.assertEqual(periodicity, [0, 0, 0])
self.assertEqual(box_change, 0)
def test_extract_box_periodic(self):
self.lmp.command("boundary p p p")
self.lmp.command("region box block 0 2 0 2 0 2")
self.lmp.command("create_box 1 box")
boxlo, boxhi, xy, yz, xz, periodicity, box_change = self.lmp.extract_box()
self.assertEqual(boxlo, [0.0, 0.0, 0.0])
self.assertEqual(boxhi, [2.0, 2.0, 2.0])
self.assertEqual(xy, 0.0)
self.assertEqual(yz, 0.0)
self.assertEqual(xz, 0.0)
self.assertEqual(periodicity, [1, 1, 1])
self.assertEqual(box_change, 0)
def test_extract_box_triclinic(self):
self.lmp.command("boundary p p p")
self.lmp.command("region box prism 0 2 0 2 0 2 0.1 0.2 0.3")
self.lmp.command("create_box 1 box")
boxlo, boxhi, xy, yz, xz, periodicity, box_change = self.lmp.extract_box()
self.assertEqual(boxlo, [0.0, 0.0, 0.0])
self.assertEqual(boxhi, [2.0, 2.0, 2.0])
self.assertEqual(xy, 0.1)
self.assertEqual(xz, 0.2)
self.assertEqual(yz, 0.3)
self.assertEqual(periodicity, [1, 1, 1])
self.assertEqual(box_change, 0)
def test_reset_box(self):
self.lmp.command("boundary p p p")
self.lmp.command("region box block 0 2 0 2 0 2")
self.lmp.command("create_box 1 box")
self.lmp.command("change_box all triclinic")
self.lmp.command("change_box all xy final 0.1 yz final 0.2 xz final 0.3")
self.lmp.reset_box([0,0,0], [1,1,1], 0, 0, 0)
boxlo, boxhi, xy, yz, xz, periodicity, box_change = self.lmp.extract_box()
self.assertEqual(boxlo, [0.0, 0.0, 0.0])
self.assertEqual(boxhi, [1.0, 1.0, 1.0])
self.assertEqual(xy, 0)
self.assertEqual(yz, 0)
self.assertEqual(xz, 0)
self.assertEqual(periodicity, [1, 1, 1])
self.assertEqual(box_change, 0)
def test_extract_variable_equalstyle(self):
self.lmp.command("variable a equal 100")
a = self.lmp.extract_variable("a")
self.assertEqual(a, 100)
self.lmp.command("variable a equal 3.14")
a = self.lmp.extract_variable("a")
self.assertEqual(a, 3.14)
def test_extract_variable_atomstyle(self):
self.lmp.command("units lj")
self.lmp.command("atom_style atomic")
self.lmp.command("atom_modify map array")
self.lmp.command("boundary f f f")
self.lmp.command("region box block 0 2 0 2 0 2")
self.lmp.command("create_box 1 box")
x = [
1.0, 1.0, 1.0,
1.0, 1.0, 1.5
]
types = [1, 1]
self.assertEqual(self.lmp.create_atoms(2, id=None, type=types, x=x), 2)
self.lmp.command("variable a atom x*x+y*y+z*z")
a = self.lmp.extract_variable("a", "all", LMP_VAR_ATOM)
self.assertEqual(a[0], x[0]*x[0]+x[1]*x[1]+x[2]*x[2])
self.assertEqual(a[1], x[3]*x[3]+x[4]*x[4]+x[5]*x[5])
def test_get_thermo(self):
self.lmp.command("units lj")
self.lmp.command("atom_style atomic")
self.lmp.command("atom_modify map array")
self.lmp.command("boundary f f f")
self.lmp.command("region box block 0 2 0 2 0 2")
self.lmp.command("create_box 1 box")
x = [
1.0, 1.0, 1.0,
1.0, 1.0, 1.5
]
types = [1, 1]
self.lmp.create_atoms(2, id=None, type=types, x=x)
state = {
"step": 0,
"dt" : 0.005,
"time" : 0.0,
"atoms" : 2.0,
"vol" : 8.0,
"lx" : 2.0,
"ly" : 2.0,
"lz" : 2.0,
"xlo" : 0,
"xhi" : 2.0,
"ylo" : 0,
"yhi" : 2.0,
"zlo" : 0,
"zhi" : 2.0
}
for key, value in state.items():
result = self.lmp.get_thermo(key)
self.assertEqual(value, result, key)
def test_extract_global(self):
self.lmp.command("region box block -1 1 -2 2 -3 3")
self.lmp.command("create_box 1 box")
self.assertEqual(self.lmp.extract_global("units"), "lj")
self.assertEqual(self.lmp.extract_global("ntimestep"), 0)
self.assertEqual(self.lmp.extract_global("dt"), 0.005)
self.assertEqual(self.lmp.extract_global("boxxlo"), -1.0)
self.assertEqual(self.lmp.extract_global("boxxhi"), 1.0)
self.assertEqual(self.lmp.extract_global("boxylo"), -2.0)
self.assertEqual(self.lmp.extract_global("boxyhi"), 2.0)
self.assertEqual(self.lmp.extract_global("boxzlo"), -3.0)
self.assertEqual(self.lmp.extract_global("boxzhi"), 3.0)
self.assertEqual(self.lmp.extract_global("boxlo"), [-1.0, -2.0, -3.0])
self.assertEqual(self.lmp.extract_global("boxhi"), [1.0, 2.0, 3.0])
self.assertEqual(self.lmp.extract_global("sublo"), [-1.0, -2.0, -3.0])
self.assertEqual(self.lmp.extract_global("subhi"), [1.0, 2.0, 3.0])
self.assertEqual(self.lmp.extract_global("periodicity"), [1,1,1])
self.assertEqual(self.lmp.extract_global("triclinic"), 0)
self.assertEqual(self.lmp.extract_global("sublo_lambda"), None)
self.assertEqual(self.lmp.extract_global("subhi_lambda"), None)
self.assertEqual(self.lmp.extract_global("respa_levels"), None)
self.assertEqual(self.lmp.extract_global("respa_dt"), None)
# set and initialize r-RESPA
self.lmp.command("run_style respa 3 5 2 pair 2 kspace 3")
self.lmp.command("mass * 1.0")
self.lmp.command("run 1 post no")
self.assertEqual(self.lmp.extract_global("ntimestep"), 1)
self.assertEqual(self.lmp.extract_global("respa_levels"), 3)
self.assertEqual(self.lmp.extract_global("respa_dt"), [0.0005, 0.0025, 0.005])
# checks only for triclinic boxes
self.lmp.command("change_box all triclinic")
self.assertEqual(self.lmp.extract_global("triclinic"), 1)
self.assertEqual(self.lmp.extract_global("sublo_lambda"), [0.0, 0.0, 0.0])
self.assertEqual(self.lmp.extract_global("subhi_lambda"), [1.0, 1.0, 1.0])
def test_create_atoms(self):
self.lmp.command("boundary f p m")
self.lmp.command("region box block 0 10 0 10 0 10")
self.lmp.command("create_box 2 box")
# second atom is outside the box -> dropped
self.lmp.create_atoms(2, [1,2], [1,1], [1.0, 1.0, 3.0, 5.0, 8.0, 12.0])
self.assertEqual(self.lmp.get_natoms(),1)
# non-zero velocities
self.lmp.create_atoms(2, None, [2,2], [2.0, 2.0, 1.0, 3.0, 4.0, 6.0], v=[0.1, 0.2, 0.3, -0.1, -0.2, -0.3])
self.assertEqual(self.lmp.get_natoms(),3)
# first atom is dropped, extend shrinkwrapped box for second atom, third atoms is wrapped around PBC.
self.lmp.create_atoms(3, [5,8,10], [1,2,1], [-1.0, 1.0, 3.0, 5.0, 8.0, 12.0, 1.0, -1.0, 1.0], shrinkexceed=True)
self.assertEqual(self.lmp.get_natoms(),5)
# set image flags
self.lmp.create_atoms(1, None, [2], [5.0, 8.0, 1.0], image=[self.lmp.encode_image_flags(1,0,-1)])
self.assertEqual(self.lmp.get_natoms(),6)
tag = self.lmp.extract_atom("id")
typ = self.lmp.extract_atom("type")
pos = self.lmp.extract_atom("x",LAMMPS_DOUBLE_2D)
vel = self.lmp.extract_atom("v",LAMMPS_DOUBLE_2D)
img = self.lmp.extract_atom("image",LAMMPS_AUTODETECT)
# expected results: tag, type, x, v, image
result = [ [ 1, 1, [1.0, 1.0, 3.0], [ 0.0, 0.0, 0.0], [0, 0, 0]],\
[ 2, 2, [2.0, 2.0, 1.0], [ 0.1, 0.2, 0.3], [0, 0, 0]],\
[ 3, 2, [3.0, 4.0, 6.0], [-0.1, -0.2, -0.3], [0, 0, 0]],\
[ 8, 2, [5.0, 8.0, 12.0], [ 0.0, 0.0, 0.0], [0, 0, 0]],\
[10, 1, [1.0, 9.0, 1.0], [ 0.0, 0.0, 0.0], [0, 0, 0]],\
[11, 2, [5.0, 8.0, 1.0], [ 0.0, 0.0, 0.0], [1, 0, -1]] ]
for i in range(len(result)):
self.assertEqual(tag[i],result[i][0])
self.assertEqual(typ[i],result[i][1])
for j in range(3):
self.assertEqual(pos[i][0:3],result[i][2])
self.assertEqual(vel[i][0:3],result[i][3])
self.assertEqual(self.lmp.decode_image_flags(img[i]), result[i][4])
##############################
if __name__ == "__main__":
unittest.main()
|
akohlmey/lammps
|
unittest/python/python-commands.py
|
Python
|
gpl-2.0
| 21,064
|
[
"LAMMPS"
] |
219a1d57bd38e16637c142431291c138ffb4ad1b7fdd8b9c8994c7408adc4e81
|
# This file is part of DmpBbo, a set of libraries and programs for the
# black-box optimization of dynamical movement primitives.
# Copyright (C) 2018 Freek Stulp
#
# DmpBbo is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# DmpBbo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with DmpBbo. If not, see <http://www.gnu.org/licenses/>.
from mpl_toolkits.mplot3d.axes3d import Axes3D
import numpy as np
import matplotlib.pyplot as plt
import os, sys
# Include scripts for plotting
lib_path = os.path.abspath('../../../python/')
sys.path.append(lib_path)
from functionapproximators.FunctionApproximatorLWR import *
from functionapproximators.FunctionApproximatorRBFN import *
from functionapproximators.functionapproximators_plotting import *
from functionapproximators.BasisFunction import Gaussian
if __name__=='__main__':
"""Run some training sessions and plot results."""
# Generate training data
n_samples_per_dim = 25
inputs = np.linspace(0.0, 2.0,n_samples_per_dim)
targets = 3*np.exp(-inputs)*np.sin(2*np.square(inputs))
fa_names = ["RBFN","LWR"]
for fa_index in range(len(fa_names)):
fa_name = fa_names[fa_index]
#############################################
# PYTHON
# Initialize function approximator
if fa_name=="LWR":
intersection = 0.5;
n_rfs = 9;
fa = FunctionApproximatorLWR(n_rfs,intersection)
else:
intersection = 0.7;
n_rfs = 9;
fa = FunctionApproximatorRBFN(n_rfs,intersection)
# Train function approximator with data
fa.train(inputs,targets)
# Make predictions for the targets
outputs = fa.predict(inputs)
# Make predictions on a grid
n_samples_grid = 201
inputs_grid = np.linspace(0.0, 2.0,n_samples_grid)
outputs_grid = fa.predict(inputs_grid)
if fa_name=="LWR":
lines_grid = fa.getLines(inputs_grid)
activations_grid = fa.getActivations(inputs_grid)
# Plotting
fig = plt.figure(fa_index,figsize=(15,5))
fig.canvas.set_window_title(fa_name)
ax = fig.add_subplot(131)
ax.set_title('Training')
plotGridPredictions(inputs_grid,outputs_grid,ax,n_samples_grid)
plotDataResiduals(inputs,targets,outputs,ax)
plotDataTargets(inputs,targets,ax)
if fa_name=="LWR":
plotLocallyWeightedLines(inputs_grid,lines_grid,ax,n_samples_grid,activations_grid)
if fa_name=="RBFN":
plotBasisFunctions(inputs_grid,activations_grid,ax,n_samples_grid)
# Perturn the function approximator's model parameters and plot
ax = fig.add_subplot(132)
ax.set_title('Random perturbations around trained model')
plotGridPredictions(inputs_grid,outputs_grid,ax,n_samples_grid)
plotDataTargets(inputs,targets,ax)
values = fa.getParameterVectorSelected()
for ii in range(5):
# Generate random vector with values between 0.5-1.5
rand_vector = 0.5 + np.random.random_sample(values.shape)
fa.setParameterVectorSelected(rand_vector*values)
outputs_grid = fa.predict(inputs_grid)
line_handles = plotGridPredictions(inputs_grid,outputs_grid,ax,n_samples_grid)
plt.setp(line_handles,linewidth=1,color='black')
ax = fig.add_subplot(133)
ax.set_title('Random perturbations around 0')
for ii in range(5):
# Generate random vector with values between -0.5-0.5
rand_vector = -0.5 + np.random.random_sample(values.shape)
fa.setParameterVectorSelected(rand_vector)
outputs_grid = fa.predict(inputs_grid)
line_handles = plotGridPredictions(inputs_grid,outputs_grid,ax,n_samples_grid)
plt.setp(line_handles,linewidth=1,color='black')
plt.show()
|
stulp/dmpbbo
|
python/functionapproximators/tests/testParameterizable.py
|
Python
|
lgpl-2.1
| 4,550
|
[
"Gaussian"
] |
30a394293685fda33f161bae6b94756eac01dc9e809a319742d19473bd577f56
|
import os
from os.path import splitext
import lsc
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from pyraf import iraf
from glob import glob
import datetime
import astropy.units as u
from astropy.io import fits
from astropy.wcs import WCS
from astropy.table import Table, vstack
from astropy.coordinates import SkyCoord
from astropy.visualization import ImageNormalize, ZScaleInterval
import requests
import json
import getpass
def weighted_avg_and_std(values, weights):
"""
Return the weighted average and standard deviation.
values, weights -- Numpy ndarrays with the same shape.
"""
average = np.average(values, weights=weights)
variance = np.average((values-average)**2, weights=weights) # Fast and numerically precise
return average, np.sqrt(variance)
try:
hostname, username, passwd, database = lsc.mysqldef.getconnection('lcogt2')
conn = lsc.mysqldef.dbConnect(hostname, username, passwd, database)
except ImportError as e:
print e
print 'try running one of these:'
print 'pip install mysql-python'
print 'conda install mysql-python'
except Exception as e:
print e
print '### warning: problem connecting to the database'
def run_getmag(imglist, _output='', _interactive=False, _show=False, _bin=1e-10, magtype='mag', snex2_upload=False, database='photlco'):
if len(imglist)==0:
print 'error: no images selected'
return
if magtype == 'mag':
mtype = 'mag'
mtypeerr = 'dmag'
elif magtype == 'fit':
mtype = 'psfmag'
mtypeerr = 'psfdmag'
elif magtype == 'ph':
mtype = 'apmag'
mtypeerr = 'psfdmag'
setup = {}
mag, dmag, mjd, filt, tel, date, filename = [], [], [], [], [], [], []
z1, z2 = [], []
magtype = []
for img in imglist:
ggg = lsc.mysqldef.getfromdataraw(conn, database, 'filename', str(img), '*')
if ggg[0][mtype]:
if np.abs(ggg[0][mtype]) <= 99:
mag.append(ggg[0][mtype])
dmag.append(ggg[0][mtypeerr])
mjd.append(ggg[0]['mjd'])
filename.append(img)
filt.append(ggg[0]['filter'])
tel.append(ggg[0]['telescope'])
date.append(ggg[0]['dateobs'])
z1.append(ggg[0]['z1'])
z2.append(ggg[0]['z2'])
magtype.append(ggg[0]['magtype'])
if tel[-1] not in setup: setup[tel[-1]] = {}
if filt[-1] not in setup[tel[-1]]: setup[tel[-1]][filt[-1]] = {}
# These three things should be the same for all the images in imglist
ftype = ggg[0]['filetype']
dtype = ggg[0]['difftype']
# Get name of target:
namequery = lsc.mysqldef.getfromdataraw(conn, 'targetnames', 'targetid', str(ggg[0]['targetid']))
targetname = namequery[0]['name']
tables = []
for _tel in setup:
for _fil in setup[_tel]:
mjd0 = np.compress((np.array(filt) == _fil) & (np.array(tel) == _tel), np.array(mjd))
mag0 = np.compress((np.array(filt) == _fil) & (np.array(tel) == _tel), np.array(mag))
dmag0 = np.compress((np.array(filt) == _fil) & (np.array(tel) == _tel), np.array(dmag))
date0 = np.compress((np.array(filt) == _fil) & (np.array(tel) == _tel), np.array(date))
filename0 = np.compress((np.array(filt) == _fil) & (np.array(tel) == _tel), np.array(filename))
z10 = np.compress((np.array(filt) == _fil) & (np.array(tel) == _tel), np.array(z1))
z20 = np.compress((np.array(filt) == _fil) & (np.array(tel) == _tel), np.array(z2))
magtype0 = np.compress((np.array(filt) == _fil) & (np.array(tel) == _tel), np.array(magtype))
inds = np.argsort(mjd0)
mag0 = np.take(mag0, inds)
dmag0 = np.take(dmag0, inds)
date0 = np.take(date0, inds)
filename0 = np.take(filename0, inds)
mjd0 = np.take(mjd0, inds)
z10 = np.take(z10, inds)
z20 = np.take(z20, inds)
magtype0 = np.take(magtype0, inds)
# z3=
magtype1, mag1, dmag1, mjd1, date1, filename1 = [], [], [], [], [], []
done = []
for i in range(0, len(mjd0)):
if i not in done:
ww = np.array([j for j in range(len(mjd0)) if (mjd0[j] - mjd0[i]) < _bin and (
mjd0[j] - mjd0[i]) >= 0.0]) # np.abs(jd0[j]-jd0[i])<bin])
for jj in ww: done.append(jj)
if len(ww) >= 2:
mjd1.append(np.mean(mjd0[ww]))
av,st=weighted_avg_and_std(mag0[ww], 1/(dmag0[ww])**2)
print av,st
mag1.append(av)
try:
# error underestimate, adding 0.01 to take in account the error in the zeropoint
dmag1.append(st+0.01)
#dmag1.append(st)
except:
dmag1.append(0.0)
magtype1.append(np.std(magtype0[ww]))
filename1.append(filename0[ww])
date1.append(min(date0[ww]) + (max(date0[ww]) - min(date0[ww])) / 2)
elif len(ww) == 1:
mjd1.append(mjd0[ww][0])
mag1.append(mag0[ww][0])
magtype1.append(magtype0[ww][0])
dmag1.append(dmag0[ww][0])
date1.append(date0[ww][0])
filename1.append(filename0[ww][0])
setup[_tel][_fil]['mag'] = mag1
setup[_tel][_fil]['magtype'] = magtype1
setup[_tel][_fil]['dmag'] = dmag1
setup[_tel][_fil]['mjd'] = list(np.array(mjd1))
setup[_tel][_fil]['jd'] = list(np.array(mjd1) + 2400000.5)
setup[_tel][_fil]['date'] = date1
setup[_tel][_fil]['filename'] = filename1
table = Table([date1, np.array(mjd1) + 2400000.5, mag1, dmag1], names=['dateobs', 'jd', 'mag', 'dmag'])
table['telescope'] = _tel
table['filter'] = lsc.sites.filterst1[_fil]
table['magtype'] = magtype1
tables.append(table)
if _show:
plotfast2(setup)
elif _output:
plotfast(setup, _output)
output_table = vstack(tables)
output_table.sort('jd')
output_table['jd'].format = '%.5f'
output_table['mag'].format = '%.4f'
output_table['dmag'].format = '%.4f'
if _output:
output_table.write(_output, format='ascii')
if snex2_upload:
### Make it snex2 readable
os.system('format_snex2.py -n ' + _output)
snex2_filename = splitext(_output)[0] + '_snex2.csv'
upload_extras = {
'reduction_type': 'manual',
'instrument': 'LCO'
}
if int(ftype)==1:
phot_type = raw_input('The default photometry type for non-difference imaging is PSF. If this is not PSF photometry please enter "Aperture", "Mixed", or "Unsure": ')
if phot_type.lower() == 'aperture':
upload_extras['photometry_type'] = 'Aperture'
elif phot_type.lower() == 'mixed':
upload_extras['photometry_type'] = 'Mixed'
elif phot_type.lower() == 'unsure':
upload_extras['photometry_type'] = 'Unsure'
else:
upload_extras['photometry_type'] = 'PSF'
elif int(ftype)==3:
phot_type = raw_input('The default photometry type for difference imaging is Aperture. If this is not Aperture photometry please enter "PSF", "Mixed", or "Unsure": ')
if phot_type.lower() == 'psf':
upload_extras['photometry_type'] = 'PSF'
elif phot_type.lower() == 'mixed':
upload_extras['photometry_type'] = 'Mixed'
elif phot_type.lower() == 'unsure':
upload_extras['photometry_type'] = 'Unsure'
else:
upload_extras['photometry_type'] = 'Aperture'
upload_extras['background_subtracted'] = True
if int(dtype)==0:
upload_extras['subtraction_algorithm'] = 'Hotpants'
else:
upload_extras['subtraction_algorithm'] = 'PyZOGY'
template_source = raw_input('Please enter the source of the template used to perform background subtraction (i.e. LCO or SDSS): ')
upload_extras['template_source'] = template_source
### Prompt user for inputs for a few upload extras
reducer_group = raw_input('Please enter the group reducing this data (i.e. LCO, UC Davis, etc.): ')
if reducer_group!='LCO':
upload_extras['reducer_group'] = reducer_group
final_photometry = raw_input('Is this reduction final? [y/n]')
if final_photometry == 'y':
upload_extras['final_reduction'] = True
else:
upload_extras['final_reduction'] = False
used_in = raw_input('Will this data be used in a paper? If so, please enter the name of the first author like "Last Name, First Name": ')
if used_in:
upload_extras['used_in'] = used_in
print(upload_extras)
print("\n")
default_groups = [
'ANU', 'ARIES', 'CSP', 'CU Boulder', 'e/PESSTO', 'ex-LCOGT', 'KMTNet', 'LBNL', 'LCOGT', 'LSQ', 'NAOC', 'Padova', 'QUB', 'SAAO', 'SIRAH', 'Skymapper', 'Tel Aviv U', 'U Penn', 'UC Berkeley', 'US GSP', 'UT Austin'
]
print('This dataproduct will be assigned to the following groups. If you would like to change these group permissions, you can do so on the page for this target on SNEx2: {}'.format(default_groups))
### Upload to snex2
username = raw_input('Please enter your SNEx2 username: ')
password = getpass.getpass(prompt='Please enter your SNEx2 password: ')
# Send the request
snex2_upload_url = 'http://test.supernova.exchange/pipeline-upload/photometry-upload/'
r = requests.post(snex2_upload_url, data={'targetname': targetname, 'data_product_type': 'photometry', 'upload_extras': json.dumps(upload_extras), 'username': username}, files={'file': (snex2_filename, open(os.getcwd() + '/' + snex2_filename, 'rb'))}, auth=(username, password))
if r.status_code == 201:
print('Upload successful')
else:
print('Error: Upload failed with code {}'.format(r.status_code))
print(r)
else:
output_table.pprint(max_lines=-1)
def run_cat(imglist, extlist, _interactive=False, stage='abscat', magtype='fit', database='photlco', field=None, refcat=None, force=False, minstars=0):
if len(extlist) > 0:
f = open('_tmpext.list', 'w')
for img in extlist:
if checkstage(img, 'zcat'):
ggg = lsc.mysqldef.getfromdataraw(conn, database, 'filename', img, '*')
f.write(ggg[0]['filepath'] + img.replace('fits', 'sn2.fits') + '\n')
f.close()
f = open('_tmp.list', 'w')
for img in imglist:
if checkstage(img, 'psf'):
ggg = lsc.mysqldef.getfromdataraw(conn, database, 'filename', img, '*')
f.write(ggg[0]['filepath'] + img.replace('fits', 'sn2.fits') + '\n')
f.close()
command = 'calibratemag.py _tmp.list -s {} -t {}'.format(stage, magtype)
if field:
command += ' -f ' + field
if len(extlist):
command += ' -e _tmpext.list'
if _interactive:
command += ' -i'
if force:
command += ' -F'
if refcat:
command += ' -c ' + refcat
if minstars:
command += ' --minstars ' + str(minstars)
print command
os.system(command)
def run_wcs(imglist, interactive=False, redo=False, _xshift=0, _yshift=0, catalogue='', database='photlco', mode='sv'):
for img in imglist:
status = checkstage(img, 'wcs')
if status == -4 and redo:
print 'wcs not good, try again'
lsc.mysqldef.updatevalue(database, 'quality', 0, os.path.split(img)[-1])
status = checkstage(img, 'wcs')
if status >= -1:
ggg = lsc.mysqldef.getfromdataraw(conn, database, 'filename', img, '*')
_dir = ggg[0]['filepath']
if mode =='sv':
command = 'lscastro.py ' + _dir + img + ' -m vizir --xshift ' + str(_xshift) + ' --yshift ' + str(_yshift)
if status == 0 or redo:
command += ' -r'
if interactive:
command += ' -i'
if catalogue:
command += ' -c ' + catalogue
else:
for field in ['apass', 'sloan', 'landolt']:
_catalogue = lsc.util.getcatalog(_dir + img, field)
if _catalogue:
command += ' -c ' + _catalogue
break
print command
os.system(command)
elif mode == 'astrometry':
lsc.lscastrodef.run_astrometry(_dir + img, True, redo)
else:
print str(_mode)+' not defined'
elif status == 0:
print 'status ' + str(status) + ': WCS stage not done'
elif status == -1:
print 'status ' + str(status) + ': sn2.fits file not found'
elif status == -2:
print 'status ' + str(status) + ': .fits file not found'
elif status == -4:
print 'status ' + str(status) + ': bad quality image'
else:
print 'status ' + str(status) + ': unknown status'
def run_psf(imglist, treshold=5, interactive=False, _fwhm='', show=False, redo=False, fix=True,
catalog='', database='photlco', use_sextractor=False, datamin=None, datamax=None,
nstars=6, banzai=False, b_sigma=3.0, b_crlim=3.0, max_apercorr=0.1):
for img in imglist:
if interactive:
ii = '-i'
else:
ii = ''
if show:
ss = '-s'
else:
ss = ''
if redo:
rr = '-r'
else:
rr = ''
if _fwhm:
ff = '-f ' + str(_fwhm) + ' '
else:
ff = ''
if fix:
gg = ' '
else:
gg = ' --fix '
if catalog:
cc=' --catalog '+catalog+' '
else:
cc=' '
if use_sextractor:
xx = ' --use-sextractor '
else:
xx = ''
if banzai:
bz = ' --banzai --b_sigma={0} --b_crlim={1} '.format(b_sigma,b_crlim)
else:
bz = ''
if datamin is not None:
dmin = ' --datamin ' + str(datamin) + ' '
else:
dmin = ' '
if datamax is not None:
dmax = ' --datamax ' + str(datamax) + ' '
else:
dmax = ' '
pp = ' -p ' + str(nstars) + ' '
add_max_apercorr = ' --max_apercorr {}'.format(max_apercorr)
status = checkstage(img, 'psf')
print 'status= ',status
if status == 1:
rr = '-r'
if status >= 1:
ggg = lsc.mysqldef.getfromdataraw(conn, database, 'filename', img, '*')
_dir = ggg[0]['filepath']
# filetype is a integer it should not be passed as string
if ggg[0]['filetype'] == 3 and ggg[0]['difftype'] == 0: # HOTPANTS difference images
##################################################################################
print '\n### get parameters for difference image'
hdrdiff=lsc.util.readhdr(_dir+img)
if 'PSF' not in hdrdiff:
raise Exception('PSF file not defined')
imgpsf=hdrdiff['PSF']
print '\n### psf file for difference image: '+imgpsf
statuspsf = checkstage(imgpsf, 'psf')
print statuspsf
if statuspsf == 2:
print 'psf file for difference image found'
gggpsf = lsc.mysqldef.getfromdataraw(conn, database, 'filename', str(imgpsf), '*')
_dirpsf = gggpsf[0]['filepath']
os.system('cp '+_dirpsf+imgpsf.replace('.fits', '.psf.fits')+' '+_dir+
img.replace('.fits', '.psf.fits'))
lsc.mysqldef.updatevalue('photlco', 'psf', img.replace('.fits', '.psf.fits'),
os.path.basename(img))
print '\n ### copy '+imgpsf.replace('.fits', '.psf.fits')+' in '+img.replace('.fits', '.psf.fits')
else:
print '\n### ERROR: PSF file not found \n please run psf file on image: '+imgpsf
#####################################################################################
if 'PHOTNORM' not in hdrdiff:
raise Exception('PHOTNORM file not defined')
else:
photnorm=hdrdiff['PHOTNORM']
if photnorm=='t':
print '\n ### zero point done with template'
imgtable = hdrdiff['TEMPLATE']
elif photnorm=='i':
print '\n ### zero point done with target'
imgtable = hdrdiff['TARGET']
sntable = imgtable.replace('.fits','.sn2.fits')
gggtable = lsc.mysqldef.getfromdataraw(conn, database, 'filename', str(imgtable), '*')
dirtable = gggtable[0]['filepath']
if os.path.isfile(dirtable+sntable):
print '\n ### fits table found '
print 'cp ' + dirtable + sntable + ' ' + _dir + img.replace('.fits', '.sn2.fits')
os.system('cp ' + dirtable + sntable + ' ' + _dir + img.replace('.fits', '.sn2.fits'))
else:
raise Exception('fits table not there, run psf for ' + sntable)
else: # PyZOGY difference images or unsubtracted images
command = 'lscpsf.py ' + _dir + img + ' ' + ii + ' ' + ss + ' ' + rr + ' ' + ff + ' ' + '-t ' + str(
treshold) + gg + cc + xx + dmin + dmax + pp + bz + add_max_apercorr
print command
os.system(command)
elif status == 0:
print 'status ' + str(status) + ': WCS stage not done'
elif status == -1:
print 'status ' + str(status) + ': sn2.fits file not found'
elif status == -2:
print 'status ' + str(status) + ': .fits file not found'
elif status == -4:
print 'status ' + str(status) + ': bad quality image'
else:
print 'status ' + str(status) + ': unknown status'
# #################################################################
def run_fit(imglist, _ras='', _decs='', _xord=3, _yord=3, _bkg=4, _size=7, _recenter=False, _ref='', interactive=False,
show=False, redo=False, dmax=None, dmin=None, database='photlco', _ra0='', _dec0=''):
if interactive:
ii = ' -i'
else:
ii = ''
if _recenter:
cc = ' -c'
else:
cc = ''
if show:
ss = ' -s'
else:
ss = ''
if redo:
rr = ' -r'
else:
rr = ''
if _ras:
_ras = ' -R ' + str(_ras)
if _decs:
_decs = ' -D ' + str(_decs)
if _ra0:
_ra0 = ' --RA0 ' + str(_ra0)
if _dec0:
_dec0 = ' --DEC0 ' + str(_dec0)
if dmax is not None:
_dmax = ' --datamax ' + str(dmax)
else:
_dmax = ''
if dmin is not None:
_dmin = ' --datamin ' + str(dmin)
else:
_dmin = ''
for img in imglist:
status = checkstage(img, 'psfmag')
print status
if status >= 1:
ggg = lsc.mysqldef.getfromdataraw(conn, database, 'filename', str(img), '*')
_dir = ggg[0]['filepath']
img0 = img.replace('.fits', '.sn2.fits')
if _ref:
print img0, _ref, show
_ras, _decs = lsc.myloopdef.getcoordfromref(img0, _ref, show)
command = 'lscsn.py ' + _dir + img + ii + ss + rr + cc + ' -x ' + str(_xord) + ' -y ' + str(_yord) + \
_ras + _decs + _ra0 + _dec0 + ' -b ' + str(_bkg) + ' -z ' + str(_size) + _dmax + _dmin
#if str(ggg[0]['filetype']) == '3':
# try:
# img2 = fits.getheader(_dir + img)['PSF']
# ggg2 = lsc.mysqldef.getfromdataraw(conn, database, 'filename', str(img2), '*')
# _dir2 = ggg2[0]['filepath']
# pp = ' -p ' + _dir2 + img2.replace('.fits', '.psf.fits') + ' '
# command = command + pp
# except:
# command = ''
# print 'PSF header not found in ' + str(img)
print command
os.system(command)
elif status == 0:
print 'status ' + str(status) + ': psf stage not done'
elif status == -1:
print 'status ' + str(status) + ': sn2.fits file not found'
elif status == -2:
print 'status ' + str(status) + ': .fits file not found'
elif status == -4:
print 'status ' + str(status) + ': bad quality image'
else:
print 'status ' + str(status) + ': unknown status'
# #################################################################
def checkstage(img, stage, database='photlco'):
# -4 bad quality
# -3 image not ingested in the dedu table
# -2 image not in the working directory
# -1 sn2 not in the working directory
# 0 not done and previous stage not done
# 1 not done and possible since previous stage done
# 2 done and possible to do again
# 3 local sequence catalogue available
status = 0
ggg = lsc.mysqldef.getfromdataraw(conn, database, 'filename', str(img), '*')
if not ggg:
status = -3 # not in the redo table
else:
_dir = ggg[0]['filepath']
if ggg[0]['quality'] == 1:
status = -4 # bad quality image
else:
if not os.path.isfile(_dir + img):
status = -2 # fits image not in working dir
elif not os.path.isfile(_dir + img.replace('.fits', '.sn2.fits')):
status = -1 # sn2 table not in working dir
if stage == 'wcs' and status >= -1:
if ggg[0]['wcs'] != 0:
status = 1
else:
status = 2
elif stage == 'psf' and status >= -1 and ggg[0]['wcs'] == 0:
if ggg[0]['psf'] == 'X':
status = 1
else:
status = 2
elif stage == 'psfmag' and status >= 0 and ggg[0]['psf'] != 'X' and ggg[0]['wcs'] == 0:
if ggg[0]['psfmag'] == 9999 or ggg[0]['psfmag'] == 9999:
status = 1
else:
status = 2
elif stage == 'zcat' and status >= 0 and ggg[0]['psf'] != 'X' and ggg[0]['wcs'] == 0:
if ggg[0]['zcat'] == 'X':
status = 1
else:
status = 2
elif stage == 'mag' and status >= 0 and ggg[0]['zcat'] != 'X' and ggg[0]['psfmag'] != 9999:
if ggg[0]['mag'] == 9999:
status = 1
else:
status = 2
elif stage == 'abscat' and status >= 0 and ggg[0]['zcat'] != 'X' and ggg[0]['psf'] != 'X':
if ggg[0]['abscat'] == 'X':
status = 1 # mag should be replaced with 'cat'
else:
status = 2
elif stage == 'checkpsf' and status >= 0 and ggg[0]['psf'] != 'X' and ggg[0]['wcs'] == 0:
status = 1
elif stage == 'checkmag' and status >= 0 and ggg[0]['psf'] != 'X' and ggg[0]['wcs'] == 0:
if ggg[0]['psfmag'] == 9999:
status = 1
else:
status = 2
else:
pass
return status
# ###################################################################################
def getcoordfromref(img2, img1, _show, database='photlco'):
iraf.digiphot(_doprint=0)
iraf.daophot(_doprint=0)
ggg1 = lsc.mysqldef.getfromdataraw(conn, database, 'filename', img1.replace('sn2.fits', 'fits'), '*')
_dir1 = ggg1[0]['filepath']
ggg2 = lsc.mysqldef.getfromdataraw(conn, database, 'filename', img2.replace('sn2.fits', 'fits'), '*')
_dir2 = ggg2[0]['filepath']
print _dir1, _dir2, img1, img2
dicti1 = lsc.lscabsphotdef.makecatalogue([_dir1 + img1])
dicti2 = lsc.lscabsphotdef.makecatalogue([_dir2 + img2])
for i in dicti1.keys():
for j in dicti1[i].keys():
ra01 = dicti1[i][j]['ra0']
dec01 = dicti1[i][j]['dec0']
for i in dicti2.keys():
for j in dicti2[i].keys():
ra02 = dicti2[i][j]['ra0']
dec02 = dicti2[i][j]['dec0']
print _dir1 + img1
t = fits.open(_dir1 + img1)
tbdata = t[1].data
hdr1 = t[0].header
psfx1 = lsc.util.readkey3(hdr1, 'PSFX1')
psfy1 = lsc.util.readkey3(hdr1, 'PSFY1')
print psfx1, psfy1, 'dddd'
if psfx1 != None and psfy1 != None:
lll = str(psfx1) + ' ' + str(psfy1)
aaa = iraf.wcsctran('STDIN', 'STDOUT', _dir1 + img1 + '[0]', Stdin=[lll], inwcs='logical', units='degrees degrees',
outwcs='world', columns='1 2', formats='%10.5f %10.5f', Stdout=1)[3]
rasn1, decsn1 = aaa.split()
if _show:
iraf.set(stdimage='imt8192')
iraf.display(_dir1 + img1.replace('sn2.fits', 'fits') + '[0]', 1, fill=True, Stdout=1, zsc='yes',
zra='yes') #,z1=0,z2=3000)
iraf.tvmark(1, 'STDIN', Stdin=[lll], mark="cross", number='no', label='no', radii=5, nxoffse=5, nyoffse=5,
color=205, txsize=1)
# ra01,dec01,ra02,dec02=np.array(ra01,float),np.array(dec01,float),np.array(ra02,float),np.array(dec02,float)
distvec, pos1, pos2 = lsc.lscastrodef.crossmatch(list(ra01), list(dec01), list(ra02), list(dec02), 5)
# plt.ion()
# plt.plot(ra01,dec01,'or')
# plt.plot(ra02,dec02,'xb',markersize=10)
# plt.plot(np.array(ra01)[pos1],np.array(dec01)[pos1],'3g',markersize=20)
# plt.plot(np.array(ra02)[pos2],np.array(dec02)[pos2],'*m',markersize=10)
# raw_input('ddd')
rra = ra01[pos1] - ra02[pos2]
ddec = dec01[pos1] - dec02[pos2]
rracut = np.compress((np.abs(np.array(ra02[pos2]) - float(rasn1)) < .05) & (np.abs(np.array(dec02[pos2]) - float(decsn1)) < .05),
np.array(rra))
ddeccut = np.compress((np.abs(np.array(ra02[pos2]) - float(rasn1)) < .05) & (np.abs(np.array(dec02[pos2]) - float(decsn1)) < .05),
np.array(ddec))
if len(rracut) > 10:
rasn2c = float(rasn1) - np.median(rracut)
decsn2c = float(decsn1) - np.median(ddeccut)
else:
rasn2c = float(rasn1) - np.median(rra)
decsn2c = float(decsn1) - np.median(ddec)
if _show:
print 'shift in arcsec (ra dec)'
print len(pos1), len(ra01)
print np.median(rra), np.median(ddec)
print 'SN position on image 2 computed'
print rasn2c, decsn2c
iraf.display(_dir2 + img2.replace('sn2.fits', 'fits') + '[0]', 2, fill=True, Stdout=1, zsc='yes',
zra='yes') #,z1=0,z2=3000)
lll = str(rasn2c) + ' ' + str(decsn2c)
bbb = iraf.wcsctran('STDIN', 'STDOUT', _dir2 + img2 + '[0]', Stdin=[lll], inwcs='world', units='degrees degrees',
outwcs='logical', columns='1 2', formats='%10.5f %10.5f', Stdout=1)[3]
iraf.tvmark(2, 'STDIN', Stdin=[bbb], mark="cross", number='no', label='no', radii=5, nxoffse=5, nyoffse=5,
color=206, txsize=1)
plt.ion()
plt.plot(rra, ddec, 'or')
plt.plot(rracut, ddeccut, 'xb')
return rasn2c, decsn2c
def filtralist(ll2, _filter, _id, _name, _ra, _dec, _bad, _filetype=1, _groupid='', _instrument='', _temptel='', _difftype=None, classid=None, _targetid=None):
ll1 = {}
for key in ll2.keys():
ll1[key] = ll2[key][:]
if _filetype == 3 and _difftype is not None:
ww = np.array([i for i in range(len(ll1['difftype'])) if ll1['difftype'][i] == _difftype])
if len(ww) > 0:
for jj in ll1.keys():
ll1[jj] = np.array(ll1[jj])[ww]
else:
for jj in ll1.keys():
ll1[jj] = []
if _filetype:
if int(_filetype) in [1, 2, 3, 4]:
ww = np.array([i for i in range(len(ll1['filetype'])) if ((ll1['filetype'][i] == int(_filetype)))])
if len(ww) > 0:
for jj in ll1.keys():
ll1[jj] = np.array(ll1[jj])[ww]
else:
for jj in ll1.keys():
ll1[jj] = []
if _bad and _bad == 'quality':
pass
else:
ww = np.array([i for i in range(len(ll1['quality'])) if ((ll1['quality'][i] != 1))])
if len(ww) > 0:
for jj in ll1.keys():
ll1[jj] = np.array(ll1[jj])[ww]
else:
for jj in ll1.keys():
ll1[jj] = []
if _filter: #filter
lista = sum([lsc.sites.filterst[fil] for fil in _filter.split(',')], [])
print lista
ww = np.array([i for i in range(len(ll1['filter'])) if ll1['filter'][i] in lista])
if len(ww) > 0:
for jj in ll1.keys():
ll1[jj] = np.array(ll1[jj])[ww]
else:
for jj in ll1.keys():
ll1[jj] = []
if _id: # ID
lista = range(int(_id.split('-')[0]),int(_id.split('-')[-1])+1)
print lista
ww = np.array([i for i in range(len(ll1['filter'])) if ((int(ll1['filename'][i].split('-')[3]) in lista))])
if len(ww) > 0:
for jj in ll1.keys():
ll1[jj] = np.array(ll1[jj])[ww]
else:
for jj in ll1.keys():
ll1[jj] = []
if _name: # name
_targetid = lsc.mysqldef.gettargetid(_name, '', '', conn, 0.01, False)
ww = np.array([i for i in range(len(ll1['filter'])) if ((_targetid == ll1['targetid'][i]))])
if len(ww) > 0:
for jj in ll1.keys():
ll1[jj] = np.array(ll1[jj])[ww]
else:
for jj in ll1.keys():
ll1[jj] = []
if _targetid: # target ID
ww = np.array([i for i in range(len(ll1['targetid'])) if ((ll1['targetid'][i]==int(_targetid)))])
if len(ww) > 0:
for jj in ll1.keys():
ll1[jj] = np.array(ll1[jj])[ww]
else:
for jj in ll1.keys():
ll1[jj] = []
if _groupid:
ww = np.array([i for i in range(len(ll1['filter'])) if ((ll1['groupidcode'][i] != _groupid))])
if len(ww) > 0:
for jj in ll1.keys():
ll1[jj] = np.array(ll1[jj])[ww]
else:
for jj in ll1.keys():
ll1[jj] = []
if _ra and _dec:
ww = np.array([i for i in range(len(ll1['ra'])) if
( np.abs(float(ll1['ra'][i]) - float(_ra)) < .5 and np.abs(float(ll1['dec'][i]) - float(_dec)) < .5 )])
if len(ww) > 0:
for jj in ll1.keys():
ll1[jj] = np.array(ll1[jj])[ww]
else:
for jj in ll1.keys():
ll1[jj] = []
####################
# add filter using instrument
if _instrument:
print _instrument
ww = np.array([i for i in range(len(ll1['instrument'])) if (_instrument in ll1['instrument'][i])])
if len(ww) > 0:
for jj in ll1.keys():
ll1[jj] = np.array(ll1[jj])[ww]
else:
for jj in ll1.keys():
ll1[jj] = []
if int(_filetype) == 3 and _temptel:
temptels = np.array([fn.replace('.optimal', '').split('.')[1].replace('diff', inst[:2])
# if fn.replace('.optimal', '').count('.') == 3 else inst[0:2]
for fn, inst in zip(ll1['filename'], ll1['instrument'])], dtype=str)
for jj in ll1:
ll1[jj] = np.array(ll1[jj])[temptels == _temptel]
if classid is not None:
standards = lsc.mysqldef.query(['select id from targets where classificationid='+str(classid)], lsc.conn)
standard_ids = [row['id'] for row in standards]
isstd = np.array([targetid in standard_ids for targetid in ll1['targetid']])
for jj in ll1:
ll1[jj] = np.array(ll1[jj])[isstd]
######################
if _bad:
if _bad == 'wcs':
ww = np.array([i for i in range(len(ll1[_bad])) if (ll1[_bad][i] != 0)])
elif _bad == 'zcat' or _bad == 'abscat':
ww = np.array([i for i in range(len(ll1[_bad])) if (ll1[_bad][i] == 'X' )])
elif _bad == 'goodcat':
ww = np.array([i for i in range(len(ll1['abscat'])) if (ll1['abscat'][i] != 'X' )])
elif _bad == 'psf':
ww = np.array([i for i in range(len(ll1['psf'])) if (ll1['psf'][i] == 'X' )])
elif _bad == 'quality':
ww = np.array([i for i in range(len(ll1['quality'])) if ((ll1['quality'][i] == 1))])
elif _bad == 'cosmic':
maskexists = [os.path.isfile(filepath+filename.replace('.fits', '.mask.fits'))
for filepath, filename in zip(ll1['filepath'], ll1['filename'])]
ww = np.flatnonzero(np.logical_not(maskexists))
elif _bad == 'diff':
include_pattern = '*{}{}.diff.fits'.format('.optimal' if _difftype == 1 else '', '.' + _temptel if _temptel else '*')
exclude_pattern = '*.optimal*.diff.fits' if _difftype == 0 else ''
ww = np.array([i for i, (filepath, filename) in enumerate(zip(ll1['filepath'], ll1['filename']))
if not (set(glob(filepath + filename.replace('.fits', include_pattern)))
- set(glob(filepath + filename.replace('.fits', exclude_pattern))))])
elif _bad == 'mag':
ww = np.array([i for i in range(len(ll1['mag'])) if (ll1['mag'][i] >= 1000 or ll1[_bad][i] < 0)])
else:
ww = np.array([i for i in range(len(ll1[_bad])) if (ll1[_bad][i] >= 1000)])
if len(ww) > 0:
for jj in ll1.keys(): ll1[jj] = np.array(ll1[jj])[ww]
else:
for jj in ll1.keys(): ll1[jj] = []
if _bad == 'psfmag': # do not consider standard field as bad psfmag files
ww = np.array([i for i in range(len(ll1['objname'])) if (
(ll1['objname'][i]) not in ['L104', 'L105', 'L95', 'L92', 'L106', 'L113',
'L101', 'L107', 'L110', 'MarkA', 'SA93',
's82_00420020', 's82_01030111', 'Ru152'])])
if len(ww) > 0:
for jj in ll1.keys(): ll1[jj] = np.array(ll1[jj])[ww]
else:
for jj in ll1.keys(): ll1[jj] = []
# if _bad in ['goodcat']:
# else:
# ww=np.array([i for i in range(len(ll1[_bad])) if ((ll1['quality'][i]!=1))])
# if len(ww)>0:
# for jj in ll1.keys(): ll1[jj]=np.array(ll1[jj])[ww]
# else:
# for jj in ll1.keys(): ll1[jj]=[]
return ll1
##########################################################################
def position(imglist, ra1, dec1, show=False):
iraf.imcoords(_doprint=0)
ra, dec = [], []
if not ra1 and not dec1:
for img in imglist:
t = fits.open(img)
tbdata = t[1].data
hdr1 = t[0].header
psfx = lsc.util.readkey3(hdr1, 'PSFX1')
psfy = lsc.util.readkey3(hdr1, 'PSFY1')
if psfx != None and psfy != None:
lll = str(psfx) + ' ' + str(psfy)
aaa = iraf.wcsctran('STDIN', 'STDOUT', img + '[0]', Stdin=[lll], inwcs='logical', units='degrees degrees',
outwcs='world', columns='1 2', formats='%10.5f %10.5f', Stdout=1)[3]
try:
ra.append(float(aaa.split()[0]))
dec.append(float(aaa.split()[1]))
except:
pass
else:
for img in imglist:
dicti = lsc.lscabsphotdef.makecatalogue([img])
for i in dicti.keys():
for j in dicti[i].keys():
ra0 = dicti[i][j]['ra']
dec0 = dicti[i][j]['dec']
ra00 = np.zeros(len(ra0))
dec00 = np.zeros(len(ra0))
for i in range(0, len(ra0)):
ra00[i] = float(iraf.real(ra0[i])) * 15
dec00[i] = float(iraf.real(dec0[i]))
distvec, pos0, pos1 = lsc.lscastrodef.crossmatch(np.array([float(ra1)]), np.array([float(dec1)]),
np.array(ra00, float), np.array(dec00, float), 5)
if len(pos1) >= 1:
ra.append(ra00[pos1[np.argmin(distvec)]])
dec.append(dec00[pos1[np.argmin(distvec)]])
print i, j, ra00[pos1[np.argmin(distvec)]], dec00[pos1[np.argmin(distvec)]]
# iraf.display(j.replace('.sn2.fits','.fits'),1,fill=True,Stdout=1)
# lll=str(ra00[pos1[np.argmin(distvec)]])+' '+str(dec00[pos1[np.argmin(distvec)]])
# aaa=iraf.wcsctran('STDIN','STDOUT',j,Stdin=[lll],inwcs='world',units='degrees degrees',outwcs='logical',columns='1 2',formats='%10.5f %10.5f',Stdout=1)[3]
# iraf.tvmark(1,'STDIN',Stdin=list([aaa]),mark="circle",number='yes',label='no',radii=20,nxoffse=5,nyoffse=5,color=205,txsize=2)
# raw_input('ddd')
if show:
plt.ion()
plt.xlabel('ra (arcsec)')
plt.ylabel('dec (arcsec)')
yticklabels = plt.getp(plt.gca(), 'yticklabels')
xticklabels = plt.getp(plt.gca(), 'xticklabels')
plt.setp(xticklabels, fontsize='20')
plt.setp(yticklabels, fontsize='20')
plt.legend(numpoints=1, markerscale=1.5)
print np.median(dec)
plt.plot(((ra - np.median(ra)) * 3600) * np.cos(np.median(dec) * np.pi / 180.), (dec - np.median(dec)) * 3600, 'or',
label='position')
try:
ra, dec = np.mean(ra), np.mean(dec)
except:
ra = ''
dec = ''
return ra, dec
#########################################################################
def mark_stars_on_image(imgfile, catfile, fig=None):
data, hdr = fits.getdata(imgfile, header=True)
if fig is None:
fig = plt.gcf()
fig.clf()
ax = fig.add_subplot(1, 1, 1)
norm = ImageNormalize(data, interval=ZScaleInterval())
ax.imshow(data, norm=norm, origin='lower', cmap='bone')
wcs = WCS(hdr)
if catfile.endswith('fits'):
cat = Table.read(catfile)
else:
cat = Table.read(catfile, format='ascii.commented_header', header_start=1, delimiter='\s')
coords = SkyCoord(cat['ra'], cat['dec'], unit=(u.hourangle, u.deg))
i, j = wcs.wcs_world2pix(coords.ra, coords.dec, 0)
ax.autoscale(False)
ax.plot(i, j, marker='o', mec='r', mfc='none', ls='none')
ax.set_title(os.path.basename(imgfile))
fig.tight_layout()
psf_star_x, psf_star_y, psf_star_id = get_psf_star_coords(imgfile)
ax.plot(psf_star_x-1, psf_star_y-1, 'o', mec='b', mfc='none', ls='none') #subtract 1 for iraf --> python indexing
for ipsf_star_x, ipsf_star_y, ipsf_star_id in zip(psf_star_x, psf_star_y, psf_star_id):
ax.text(ipsf_star_x-1, ipsf_star_y-1, ipsf_star_id, color='b')
def get_psf_star_coords(imgfile):
psf_file = imgfile.replace('.fits', '.psf.fits')
psf_hdr = fits.getheader(psf_file, 0)
npsfstars = psf_hdr['NPSFSTAR']
id = []
x = np.empty(npsfstars)
y = np.empty(npsfstars)
for indx in range(npsfstars):
starnum = indx+1
id.append(psf_hdr['ID{}'.format(starnum)])
x[indx] = psf_hdr['X{}'.format(starnum)]
y[indx] = psf_hdr['Y{}'.format(starnum)]
return x, y, id
def checkcat(imglist, database='photlco'):
plt.ion()
plt.figure(figsize=(6, 6))
for img in imglist:
status = checkstage(img, 'checkpsf')
#print img,status
if status >= 1:
ggg = lsc.mysqldef.getfromdataraw(conn, database, 'filename', str(img), 'filepath, abscat')
_dir = ggg[0]['filepath']
print _dir, img
catfile = _dir + img.replace('.fits', '.cat')
if os.path.isfile(catfile):
with open(catfile) as f:
lines = f.readlines()
print len(lines) - 2, 'stars in catalog'
if len(lines) > 2:
mark_stars_on_image(_dir + img, catfile)
aa = raw_input('>>>good catalogue [[y]/n] or [b] bad quality ? ')
if not aa: aa = 'y'
else: # automatically delete the file if is is only a header
aa = 'n'
if aa in ['n', 'N', 'No', 'NO', 'bad', 'b', 'B']:
print 'updatestatus bad catalogue'
lsc.mysqldef.updatevalue(database, 'abscat', 'X', os.path.basename(img))
lsc.util.delete(_dir + img.replace('.fits', '.cat'))
if aa in ['bad', 'b', 'B']:
print 'updatestatus bad quality'
lsc.mysqldef.updatevalue(database, 'quality', 1, os.path.basename(img))
elif ggg[0]['abscat'] != 'X':
lsc.mysqldef.updatevalue(database, 'abscat', 'X', os.path.basename(img))
elif status == 0:
print 'status ' + str(status) + ': WCS stage not done'
elif status == -1:
print 'status ' + str(status) + ': sn2.fits file not found'
elif status == -2:
print 'status ' + str(status) + ': .fits file not found'
elif status == -4:
print 'status ' + str(status) + ': bad quality image'
else:
print 'status ' + str(status) + ': unknown status'
def checkpsf(imglist, no_iraf=False, database='photlco'):
if no_iraf:
plt.ion()
img_fig = plt.figure(figsize=(6, 6))
psf_fig = plt.figure(figsize=(8, 4))
else:
iraf.digiphot(_doprint=0)
iraf.daophot(_doprint=0)
for img in imglist:
status = checkstage(img, 'checkpsf')
print img, status
if status >= 1:
ggg = lsc.mysqldef.getfromdataraw(conn, database, 'filename', str(img), '*')
_dir = ggg[0]['filepath']
if os.path.isfile(_dir + img.replace('.fits', '.psf.fits')):
if no_iraf:
mark_stars_on_image(_dir + img, _dir + img.replace('.fits', '.sn2.fits'), fig=img_fig)
psf_filename = _dir + img.replace('.fits', '.psf.fits')
make_psf_plot(psf_filename, fig=psf_fig)
else:
lsc.util.marksn2(_dir + img, _dir + img.replace('.fits', '.sn2.fits'))
iraf.delete('_psf.psf.fits', verify=False)
iraf.seepsf(_dir + img.replace('.fits', '.psf.fits'), '_psf.psf')
iraf.surface('_psf.psf')
aa = raw_input('>>>good psf [[y]/n] or [b] bad quality ? ')
if not aa: aa = 'y'
if aa in ['n', 'N', 'No', 'NO', 'bad', 'b', 'B']:
print 'updatestatus bad'
lsc.mysqldef.updatevalue(database, 'psf', 'X', os.path.basename(img))
lsc.mysqldef.updatevalue(database, 'psfmag', 9999, os.path.basename(img))
if os.path.isfile(_dir + img.replace('.fits', '.psf.fits')):
print 'rm ' + _dir + img.replace('.fits', '.psf.fits')
os.system('rm ' + _dir + img.replace('.fits', '.psf.fits'))
if os.path.isfile(_dir + img.replace('.fits', '.sn2.fits')):
print 'rm ' + _dir + img.replace('.fits', '.sn2.fits')
os.system('rm ' + _dir + img.replace('.fits', '.sn2.fits'))
if aa in ['bad', 'b', 'B']:
print 'updatestatus bad quality'
lsc.mysqldef.updatevalue(database, 'quality', 1, os.path.basename(img))
elif status == 0:
print 'status ' + str(status) + ': PSF stage not done'
elif status == -1:
print 'status ' + str(status) + ': sn2.fits file not found'
elif status == -2:
print 'status ' + str(status) + ': .fits file not found'
elif status == -4:
print 'status ' + str(status) + ': bad quality image'
else:
print 'status ' + str(status) + ': unknown status'
def seepsf(psf_filename, saveto=None):
"""
Calculates PSF from header info plus residuals without using iraf
:param psf_filename: filepath+filename of psf file
:param saveto: filepath+filename of output file
"""
psf_hdul = fits.open(psf_filename)
N = psf_hdul[0].header['PSFHEIGH'] / psf_hdul[0].header['NPSFSTAR']
sigma_x = psf_hdul[0].header['PAR1']
sigma_y = psf_hdul[0].header['PAR2']
psfrad = psf_hdul[0].header['PSFRAD']
NAXIS1 = psf_hdul[0].header['NAXIS1']
NAXIS2 = psf_hdul[0].header['NAXIS2']
x = np.linspace(-psfrad, psfrad, num=NAXIS1)
y = np.linspace(-psfrad, psfrad, num=NAXIS2)
X, Y = np.meshgrid(x, y)
# PSF is elliptical gaussian (from header) + residuals (from img data)
# in description https://iraf.net/irafhelp.php?val=seepsf
# not 100% sure normalization is correct, but tested on
# good and bad psfs and I think it's right
analytic = N * np.exp(-(((X ** 2) / (sigma_x ** 2)) + ((Y ** 2) / (sigma_y ** 2))) / 2)
residual = psf_hdul[0].data
Z = analytic + residual
if saveto is not None:
psf_hdul[0].data = Z
psf_hdul.writeto(saveto, overwrite=True)
return X, Y, Z
def make_psf_plot(psf_filename, fig=None):
"""
Displays plots of PSFs for the checkpsf stage without using iraf
:param psf_filename: filepath+filename of psf file
"""
if fig is None:
fig = plt.gcf()
fig.clf()
X, Y, Z = seepsf(psf_filename)
ax = fig.add_subplot(1, 1, 1, projection='3d')
# ax.plot_wireframe(X, Y, Z) # the transparency makes this challenging to interpret
# replicate iraf look, much slower than wireframe
ax.plot_surface(X, Y, Z, antialiased=True, linewidth=.25, color='black', edgecolor='white')
ax.view_init(elev=40, azim=330) # replicating starting view of iraf PSF
ax.set_axis_off()
ax.set_title('PSF for {psf_filename}'.format(psf_filename=os.path.basename(psf_filename)))
fig.tight_layout()
#############################################################################
def checkwcs(imglist, force=True, database='photlco', _z1='', _z2=''):
iraf.digiphot(_doprint=0)
iraf.daophot(_doprint=0)
iraf.set(stdimage='imt2048')
print force
print _z1, _z2
for img in imglist:
status = checkstage(img, 'wcs')
if status >= 0 or force == False:
ggg = lsc.mysqldef.getfromdataraw(conn, database, 'filename', str(img), '*')
_dir = ggg[0]['filepath']
_filter = ggg[0]['filter']
_exptime = ggg[0]['exptime']
if _z1 != None and _z2 != None:
iraf.display(_dir + img + '[0]', 1, fill=True, Stdout=1, zscale='no', zrange='no', z1=_z1, z2=_z2)
else:
iraf.display(_dir + img + '[0]', 1, fill=True, Stdout=1)
###########################################
_ra0, _dec0, _SN0 = lsc.util.checksnlist(_dir + img, 'supernovaelist.txt')
if not _SN0: _ra0, _dec0, _SN0 = lsc.util.checksnlist(_dir + img, 'standardlist.txt')
if not _SN0: _ra0, _dec0, _ = lsc.util.checksndb(img)
if _ra0 and _dec0:
ccc = iraf.wcsctran('STDIN', 'STDOUT', _dir + img + '[0]', Stdin=[str(_ra0) + ' ' + str(_dec0)], inwcs='world',
units='degrees degrees', outwcs='logical', \
columns='1 2', formats='%10.5f %10.5f', Stdout=1)
iraf.tvmark(1, 'STDIN', Stdin=list(ccc), mark="circle", number='yes', label='no', radii=15, nxoffse=5,
nyoffse=5, color=206, txsize=3)
for field in ['sloan', 'apass', 'landolt']:
_catalogue = lsc.util.getcatalog(_dir + img, field)
if _catalogue:
catvec = lsc.lscastrodef.readtxt(_catalogue)
bbb = [ra + ' ' + dec for ra, dec in zip(catvec['ra'], catvec['dec'])]
aaa = iraf.wcsctran('STDIN', 'STDOUT', _dir + img + '[0]', Stdin=bbb, inwcs='world', units='degrees degrees',
outwcs='logical', columns='1 2', formats='%10.5f %10.5f', Stdout=1)
iraf.tvmark(1, 'STDIN', Stdin=list(aaa), mark="cross", number='yes', label='no', radii=1, nxoffse=5,
nyoffse=5, color=204, txsize=1)
break
else:
catvec = lsc.lscastrodef.querycatalogue('usnoa2', _dir + img, 'vizir')
apix1 = catvec['pix']
iraf.tvmark(1, 'STDIN', Stdin=list(apix1), mark="circle", number='yes', label='no', radii=20, nxoffse=5,
nyoffse=5, color=205, txsize=2)
aa = raw_input('>>>good wcs [[y]/n] or [b] bad quality ? ')
if not aa:
aa = 'y'
if aa in ['n', 'N', 'No', 'NO', 'bad', 'b', 'B']:
print 'updatestatus bad'
lsc.mysqldef.updatevalue(database, 'wcs', 9999, os.path.basename(img))
lsc.mysqldef.updatevalue(database, 'psf', 'X', os.path.basename(img))
lsc.mysqldef.updatevalue(database, 'psfmag', 9999, os.path.basename(img))
if os.path.isfile(_dir + img.replace('.fits', '.psf.fits')):
print 'rm ' + _dir + img.replace('.fits', '.psf.fits')
os.system('rm ' + _dir + img.replace('.fits', '.psf.fits'))
if os.path.isfile(_dir + img.replace('.fits', '.sn2.fits')):
print 'rm ' + _dir + img.replace('.fits', '.sn2.fits')
os.system('rm ' + _dir + img.replace('.fits', '.sn2.fits'))
if aa in ['bad', 'b', 'B']:
print 'updatestatus bad quality'
lsc.mysqldef.updatevalue(database, 'quality', 1, os.path.basename(img))
elif aa in ['c', 'C', 'cancel']:
print 'remove from database'
os.system('rm ' + _dir + img)
lsc.mysqldef.deleteredufromarchive(os.path.basename(img), 'photlco', 'filename')
lsc.mysqldef.deleteredufromarchive(os.path.basename(img), 'photpairing', 'nameout')
# elif status==0: print 'status '+str(status)+': WCS stage not done'
elif status == -1:
print 'status ' + str(status) + ': sn2.fits file not found'
elif status == -2:
print 'status ' + str(status) + ': .fits file not found'
elif status == -4:
print 'status ' + str(status) + ': bad quality image'
else:
print 'status ' + str(status) + ': unknown status'
##################################################################
#############################################################################
def makestamp(imglist, database='photlco', _z1='', _z2='', _interactive=True, redo=False, _output=''):
for img in imglist:
_targetid = ''
status = lsc.checkstage(img, 'wcs')
if status >= 0: # or force==False:
ggg = lsc.mysqldef.getfromdataraw(lsc.conn, database, 'filename', str(img), '*')
_dir = ggg[0]['filepath']
_output = _dir + img.replace('.fits', '.png')
if os.path.isfile(_output):
if redo:
os.remove(_output)
else:
status = -5
if status >= 0: # or force==False:
_targetid = ggg[0]['targetid']
hdr = fits.open(_dir + img)
X = hdr[0].data
header = hdr[0].header
wcs = WCS(header)
_ra0, _dec0, _ = lsc.util.checksndb(img)
if _ra0 and _dec0:
[[xPix, yPix]] = wcs.wcs_world2pix([(_ra0, _dec0)], 1)
if (xPix > 0 and xPix <= header.get('NAXIS1')) and (yPix <= header.get('NAXIS2') and yPix > 0):
xmin, xmax = xPix - 300, xPix + 300
ymin, ymax = yPix - 300, yPix + 300
else:
try:
xmin, xmax = 0, header.get('NAXIS1')
ymin, ymax = 0, header.get('NAXIS2')
except:
xmin, xmax = 0, 1000
ymin, ymax = 0, 1000
_sky, _sig = lsc.myloopdef.getsky(X[xmin:xmax, ymin:ymax])
_z1 = _sky - _sig
_z2 = _sky + _sig * 5
plt.clf()
try:
im = plt.imshow(X, cmap='gray', norm=None, aspect=None, interpolation='nearest', alpha=None,
vmin=float(_z1), vmax=float(_z2), origin='upper', extent=None)
except:
im = plt.imshow(X, cmap='gray', norm=None, aspect=None, interpolation='nearest', alpha=None, vmin=0,
vmax=1000, origin='upper', extent=None)
plt.xlim(float(xPix) - 200, float(xPix) + 200)
plt.ylim(float(yPix) + 200, float(yPix) - 200)
plt.plot([float(xPix)], [float(yPix)], marker='o', mfc='none', markersize=25, markeredgewidth=2,
markeredgecolor='r')
if _interactive:
plt.show()
else:
print _output
lsc.delete(_output)
plt.savefig(_output)
else:
print _dir + img, _targetid
print 'SN not found'
elif status == -1:
print 'status ' + str(status) + ': sn2.fits file not found'
elif status == -2:
print 'status ' + str(status) + ': .fits file not found'
elif status == -4:
print 'status ' + str(status) + ': bad quality image'
elif status == -5:
print 'status ' + str(status) + ': png already done'
else:
print 'status ' + str(status) + ': unknown status'
def checkfast(imglist, force=True, database='photlco'):
iraf.digiphot(_doprint=0)
iraf.daophot(_doprint=0)
print force
for img in imglist:
status = checkstage(img, 'wcs')
if status >= 0 or force == False:
ggg = lsc.mysqldef.getfromdataraw(conn, database, 'filename', str(img), '*')
_dir = ggg[0]['filepath']
iraf.display(_dir + img + '[0]', 1, fill=True, Stdout=1)
###########################################
aa = raw_input('>>>good or bad quality [[g]/b]? ')
if not aa: aa = 'g'
if aa in ['bad', 'b', 'B']:
lsc.mysqldef.updatevalue(database, 'wcs', 9999, os.path.basename(img))
lsc.mysqldef.updatevalue(database, 'psf', 'X', os.path.basename(img))
lsc.mysqldef.updatevalue(database, 'psfmag', 9999, os.path.basename(img))
if os.path.isfile(_dir + img.replace('.fits', '.psf.fits')):
print 'rm ' + _dir + img.replace('.fits', '.psf.fits')
os.system('rm ' + _dir + img.replace('.fits', '.psf.fits'))
if os.path.isfile(_dir + img.replace('.fits', '.sn2.fits')):
print 'rm ' + _dir + img.replace('.fits', '.sn2.fits')
os.system('rm ' + _dir + img.replace('.fits', '.sn2.fits'))
print 'updatestatus bad quality'
lsc.mysqldef.updatevalue(database, 'quality', 1, os.path.basename(img))
else:
print 'updatestatus quality good'
lsc.mysqldef.updatevalue(database, 'quality', 127, os.path.basename(img))
# elif status==0: print 'status '+str(status)+': WCS stage not done'
elif status == -1:
print 'status ' + str(status) + ': sn2.fits file not found'
elif status == -2:
print 'status ' + str(status) + ': .fits file not found'
elif status == -4:
print 'status ' + str(status) + ': bad quality image'
else:
print 'status ' + str(status) + ': unknown status'
##################################################################
def checkcosmic(imglist, database='photlco'):
iraf.digiphot(_doprint=0)
iraf.daophot(_doprint=0)
for img in imglist:
status = checkstage(img, 'wcs')
if status >= 0:
photlcodict = lsc.mysqldef.getfromdataraw(conn, database, 'filename', img, '*')
_dir = photlcodict[0]['filepath']
origimg = _dir + img
maskimg = origimg.replace('.fits', '.mask.fits')
cleanimg = origimg.replace('.fits', '.clean.fits')
diffimg = origimg.replace('.fits', '.diff.fits')
if os.path.isfile(origimg) and os.path.isfile(maskimg):
print img, photlcodict[0]['filter']
iraf.set(stdimage='imt8192')
iraf.display(origimg + '[0]', 1, fill=True, Stdout=1)
iraf.display(maskimg, 2, zscale=False, fill=True, Stdout=1)
ans = raw_input('>>> good mask [[y]/n] or [b]ad quality? ')
if ans in ['n', 'N', 'No', 'NO', 'bad', 'b', 'B']:
print 'updatestatus bad'
print 'rm', maskimg
os.system('rm ' + maskimg)
print 'rm', cleanimg
os.system('rm ' + cleanimg)
print 'rm', diffimg.replace('.fits', '*')
os.system('rm ' + diffimg.replace('.fits', '*'))
print 'delete', os.path.basename(diffimg), 'from database'
lsc.mysqldef.deleteredufromarchive(os.path.basename(diffimg), 'photlco', 'filename')
if ans in ['bad', 'b', 'B']:
print 'updatestatus bad quality'
lsc.mysqldef.updatevalue(database, 'quality', 1, img)
else:
for f in [origimg, maskimg, cleanimg, diffimg]:
if not os.path.isfile(f): print f, 'not found'
elif status == -1:
print 'status ' + str(status) + ': sn2.fits file not found'
elif status == -2:
print 'status ' + str(status) + ': .fits file not found'
elif status == -4:
print 'status ' + str(status) + ': bad quality image'
else:
print 'status ' + str(status) + ': unknown status'
def display_subtraction(img):
ggg = lsc.mysqldef.getfromdataraw(conn, 'photlco', 'filename', img, '*')
diffimg = ggg[0]['filepath'] + img
origimg = diffimg.split('.')[0] + '.' + diffimg.split('.')[-1]
tempimg = diffimg.replace('diff', 'ref')
if os.path.isfile(diffimg) and os.path.isfile(origimg) and os.path.isfile(tempimg):
diffdata = fits.getdata(diffimg)
origdata = fits.getdata(origimg)
tempdata = fits.getdata(tempimg)
plt.clf()
ax1 = plt.subplot(2, 2, 1, adjustable='box-forced')
ax2 = plt.subplot(2, 2, 2, sharex=ax1, sharey=ax1, adjustable='box-forced')
ax3 = plt.subplot(2, 2, 3, sharex=ax1, sharey=ax1, adjustable='box-forced')
ax1.imshow(origdata, origin='lower', norm=ImageNormalize(origdata, interval=ZScaleInterval()))
ax2.imshow(tempdata, origin='lower', norm=ImageNormalize(tempdata, interval=ZScaleInterval()))
ax3.imshow(diffdata, origin='lower', norm=ImageNormalize(diffdata, interval=ZScaleInterval()))
basename = origimg.split('.')[0]
ax1.set_title(origimg.replace(basename, ''))
ax2.set_title(tempimg.replace(basename, ''))
ax3.set_title(diffimg.replace(basename, ''))
plt.xlim(origdata.shape[0] / 2 - 100, origdata.shape[0] / 2 + 100)
plt.ylim(origdata.shape[1] / 2 - 100, origdata.shape[1] / 2 + 100)
plt.gcf().text(0.75, 0.25,
os.path.basename(basename) + u'\nfilter = {filter}\npsfmag = {psfmag:.2f} \u00b1 {psfdmag:.2f} mag\nmag = {mag:.2f} \u00b1 {dmag:.2f} mag'.format(**ggg[0]),
va='center', ha='center')
plt.tight_layout()
else:
for f in [origimg, tempimg, diffimg]:
if not os.path.isfile(f): print f, 'not found'
return diffimg, origimg, tempimg
def checkdiff(imglist, database='photlco'):
iraf.digiphot(_doprint=0)
iraf.daophot(_doprint=0)
iraf.set(stdimage='imt2048')
for img in imglist:
status = checkstage(img, 'wcs')
if status >= 0:
photlcodict = lsc.mysqldef.getfromdataraw(conn, database, 'filename', img, '*')
_dir = photlcodict[0]['filepath']
diffimg = _dir + img
origimg = diffimg.split('.')[0] + '.' + diffimg.split('.')[-1]
tempimg = diffimg.replace('diff', 'ref')
if os.path.isfile(diffimg) and os.path.isfile(origimg) and os.path.isfile(tempimg):
print img, photlcodict[0]['filter']
iraf.display(origimg + '[0]', 1, fill=True, Stdout=1)
iraf.display(tempimg, 2, fill=True, Stdout=1)
iraf.display(diffimg, 3, fill=True, Stdout=1)
ans = raw_input('>>> good difference [[y]/n] or [b]ad quality (original image)? ')
if ans in ['n', 'N', 'No', 'NO', 'bad', 'b', 'B']:
print 'updatestatus bad'
print 'rm', diffimg.replace('.fits', '*')
os.system('rm ' + diffimg.replace('.fits', '*'))
print 'rm', tempimg
os.system('rm ' + tempimg)
print 'delete', img, 'from database'
lsc.mysqldef.deleteredufromarchive(img, 'photlco', 'filename')
if ans in ['bad', 'b', 'B']:
print 'updatestatus bad quality'
lsc.mysqldef.updatevalue(database, 'quality', 1, os.path.basename(origimg))
else:
for f in [origimg, tempimg, diffimg]:
if not os.path.isfile(f): print f, 'not found'
elif status == -1:
print 'status ' + str(status) + ': sn2.fits file not found'
elif status == -2:
print 'status ' + str(status) + ': .fits file not found'
elif status == -4:
print 'status ' + str(status) + ': bad quality image'
else:
print 'status ' + str(status) + ': unknown status'
def display_psf_fit(img, datamax=None):
ggg = lsc.mysqldef.getfromdataraw(conn, 'photlco', 'filename', img, '*')
ogfile = ggg[0]['filepath'] + img.replace('.fits', '.og.fits')
rsfile = ggg[0]['filepath'] + img.replace('.fits', '.rs.fits')
if os.path.isfile(ogfile) and os.path.isfile(rsfile):
ogdata, hdr = fits.getdata(ogfile, header=True)
rsdata = fits.getdata(rsfile)
if datamax is None:
datamax = lsc.util.readkey3(hdr, 'datamax')
plt.clf()
axL = plt.subplot(1, 2, 1, adjustable='box-forced')
axR = plt.subplot(1, 2, 2, sharex=axL, sharey=axL, adjustable='box-forced')
vmin = np.percentile(ogdata, 5)
vmax = np.percentile(ogdata, 95)
im = axL.imshow(ogdata, vmin=vmin, vmax=vmax, origin='lower')
axR.imshow(rsdata, vmin=vmin, vmax=vmax, origin='lower')
j_sat, i_sat = np.where(ogdata > datamax)
if len(i_sat):
axL.plot(i_sat, j_sat, 'rx', label='{:d} pixels > {:.0f} ADU'.format(len(i_sat), datamax))
axL.legend()
plt.colorbar(im, ax=[axL, axR], orientation='horizontal')
plt.gcf().text(0.5, 0.99, u'{filename}\nfilter = {filter}\npsfmag = {psfmag:.2f} \u00b1 {psfdmag:.2f} mag\nmag = {mag:.2f} \u00b1 {dmag:.2f} mag'.format(**ggg[0]), va='top', ha='center')
return ogfile, rsfile
def checkmag(imglist, datamax=None):
plt.ion()
for img in imglist:
status = checkstage(img, 'checkmag')
if status > 1:
ogfile, rsfile = display_psf_fit(img, datamax)
aa = raw_input('>>>good mag [[y]/n] or [b] bad quality ? ')
if aa in ['n', 'N', 'No', 'NO', 'bad', 'b', 'B']:
print 'update status: bad psfmag & mag'
lsc.mysqldef.query(['update photlco set psfmag=9999, psfdmag=9999, apmag=9999, dapmag=9999, mag=9999, dmag=9999 where filename="{}"'.format(img)], lsc.conn)
os.system('rm -v ' + ogfile)
os.system('rm -v ' + rsfile)
if aa in ['bad', 'b', 'B']:
print 'update status: bad quality'
lsc.mysqldef.updatevalue('photlco', 'quality', 1, img)
elif status == 1:
print 'status ' + str(status) + ': psfmag stage not done'
elif status == 0:
print 'status ' + str(status) + ': WCS stage not done'
elif status == -1:
print 'status ' + str(status) + ': sn2.fits file not found'
elif status == -2:
print 'status ' + str(status) + ': .fits file not found'
elif status == -4:
print 'status ' + str(status) + ': bad quality image'
else:
print 'status ' + str(status) + ': unknown status'
def checkpos(imglist, _ra, _dec, database='photlco'):
imglist2 = []
for img in imglist:
status = checkstage(img, 'checkmag')
if status >= 1:
ggg = lsc.mysqldef.getfromdataraw(conn, database, 'filename', str(img), '*')
_dir = ggg[0]['filepath']
if os.path.isfile(_dir + img.replace('.fits', '.sn2.fits')): imglist2.append(
_dir + img.replace('.fits', '.sn2.fits'))
print imglist2, _ra, _dec
ra, dec = lsc.myloopdef.position(imglist2, _ra, _dec, show=True)
print '######## mean ra and dec position ############'
print 'ra= ' + str(ra)
print 'dec= ' + str(dec)
print '#############'
def checkquality(imglist, database='photlco'):
iraf.digiphot(_doprint=0)
iraf.daophot(_doprint=0)
iraf.set(stdimage='imt2048')
for img in imglist:
status = checkstage(img, 'checkquality')
if status == -4:
ggg = lsc.mysqldef.getfromdataraw(conn, database, 'filename', str(img), '*')
if not ggg:
status = -3 # not in the redo table
else:
_dir = ggg[0]['filepath']
if os.path.isfile(_dir + img):
iraf.display(_dir + img + '[0]', 1, fill=True, Stdout=1)
aa = raw_input('>>>good image [y/[n]] ? ')
if not aa: aa = 'n'
if aa in ['n', 'N', 'No', 'NO']:
print 'status bad'
else:
print 'updatestatus good'
lsc.mysqldef.updatevalue(database, 'quality', 127, os.path.basename(img))
#lsc.mysqldef.updatevalue('photlco','psfmag',9999,os.path.basename(img))
#if os.path.isfile(_dir+img.replace('.fits', '.psf.fits')):
#print 'rm '+_dir+img.replace('.fits', '.psf.fits')
#os.system('rm '+_dir+img.replace('.fits', '.psf.fits'))
#if os.path.isfile(_dir+img.replace('.fits', '.sn2.fits')):
#print 'rm '+_dir+img.replace('.fits', '.sn2.fits')
#os.system('rm '+_dir+img.replace('.fits', '.sn2.fits'))
#else: print 'status: quality good '
##################################################################
def onkeypress2(event):
global idd, _mjd, _mag, _setup, _filename, shift, _database
xdata, ydata = event.xdata, event.ydata
dist = np.sqrt((xdata - _mjd) ** 2 + (ydata - _mag) ** 2)
ii = np.argmin(dist)
if ii in idd: idd.remove(ii)
print _filename[ii]
print _mag[ii]
_dir = lsc.mysqldef.getvaluefromarchive(_database, 'filename', _filename[ii], 'filepath')
if 'filepath' in _dir[0]:
_dir = _dir[0]['filepath']
else:
_dir = ''
if _dir:
if os.path.isfile(_dir + _filename[ii].replace('.fits', '.og.fits')) and os.path.isfile(
_dir + _filename[ii].replace('.fits', '.rs.fits')):
iraf.digiphot(_doprint=0)
iraf.daophot(_doprint=0)
iraf.display(_dir + _filename[ii].replace('.fits', '.og.fits'), 1, fill=True, Stdout=1)
iraf.display(_dir + _filename[ii].replace('.fits', '.rs.fits'), 2, fill=True, Stdout=1)
if event.key in ['d']:
lsc.mysqldef.updatevalue(_database, 'mag', 9999, _filename[ii])
lsc.mysqldef.updatevalue(_database, 'psfmag', 9999, _filename[ii])
lsc.mysqldef.updatevalue(_database, 'apmag', 9999, _filename[ii])
if _dir:
lsc.util.updateheader(_dir + _filename[ii].replace('.fits', '.sn2.fits'), 0,
{'PSFMAG1': (9999, 'psf magnitude')})
lsc.util.updateheader(_dir + _filename[ii].replace('.fits', '.sn2.fits'), 0,
{'APMAG1': (9999, 'ap magnitude')})
elif event.key in ['u']:
lsc.mysqldef.updatevalue(_database, 'magtype', -1, _filename[ii])
print '\n### set as a limit'
elif event.key in ['b']:
lsc.mysqldef.updatevalue(_database, 'quality', 1, _filename[ii])
print '\n### set bad quality'
print '\n### press:\n d to cancel value,\n c to check one point\n u to set the upper limit\n b to set bad quality.\n Return to exit ...'
nonincl = []
for i in range(len(_mjd)):
if i not in idd: nonincl.append(i)
_symbol = 'sdo+34<>^*sdo+34<>^*sdo+34<>^*sdo+34<>^*'
_color = {'U': 'b', 'B': 'r', 'V': 'g', 'R': 'c', 'I': 'm', 'up': 'b', 'gp': 'r', 'rp': 'g', 'ip': 'c', 'zs': 'm', \
'Bessell-B': 'r', 'Bessell-V': 'g', 'Bessell-R': 'c', 'Bessell-I': 'm', \
'SDSS-G': 'r', 'SDSS-R': 'g', 'SDSS-I': 'c', 'Pan-Starrs-Z': 'm'}
_shift = {'U': -2, 'B': -1, 'V': 0, 'R': 1, 'I': 2, 'up': -2, 'gp': -1, 'rp': 0, 'ip': 1, 'zs': 2, \
'Bessell-B': -1, 'Bessell-V': 0, 'Bessell-R': 1, 'Bessell-I': 2, \
'SDSS-G': -1, 'SDSS-R': 0, 'SDSS-I': 1, 'Pan-Starrs-Z': 2}
ii = 0
mag, mjd = [], []
for _tel in _setup:
shift = 0
for _fil in _setup[_tel]:
shift = _shift[_fil]
col = _color[_fil]
plt.plot(np.array(_setup[_tel][_fil]['mjd']), np.array(_setup[_tel][_fil]['mag']) + shift, _symbol[ii], color=col,
markersize=10)
mag = list(mag) + list(np.array(_setup[_tel][_fil]['mag']) + shift)
mjd = list(mjd) + list(_setup[_tel][_fil]['mjd'])
ii = ii + 1
plt.xlabel('mjd')
plt.ylabel('magnitude')
_mag = mag[:]
_mjd = mjd[:]
_mjd = np.array(_mjd)
_mag = np.array(_mag)
idd = range(len(_mjd))
yticklabels = plt.getp(plt.gca(), 'yticklabels')
xticklabels = plt.getp(plt.gca(), 'xticklabels')
plt.setp(xticklabels, fontsize='10')
plt.setp(yticklabels, fontsize='10')
plt.legend(bbox_to_anchor=(1.01, 1), loc=2, borderaxespad=0., markerscale=.8, numpoints=1)
# plt.legend(numpoints=1,markerscale=.8)
leg = plt.gca().get_legend()
ltext = leg.get_texts()
plt.setp(ltext, fontsize=10)
plt.plot(_mjd, _mag, 'ok', markersize=1)
plt.plot(_mjd[nonincl], _mag[nonincl], 'ow')
##################################################################
class PickablePlot():
def __init__(self, x, y, mainmenu='', selectedmenu='', hooks={}):
self.x = x
self.y = y
self.selectedmenu = selectedmenu
self.hooks = hooks
plt.ion()
fig = plt.figure()
fig.canvas.mpl_connect('pick_event', self.onclick)
self.i_active = None
self.xdel = np.array([])
self.ydel = np.array([])
axlims = None
while True:
fig.clf()
if 'plot' in hooks:
hooks['plot']()
plt.plot(self.x, self.y, 'k,', picker=5)
plt.plot(self.xdel, self.ydel, 'kx', ms=10)
if axlims is not None:
plt.axis(axlims)
key = raw_input(mainmenu)
if key in hooks and self.i_active is not None:
hooks[key](self.i_active)
if key == '':
break
else:
self.delete_current()
self.i_active = None
plt.draw()
axlims = fig.gca().axis()
def onclick(self, event):
print # to get off the raw_input line
self.i_active = event.ind[0]
if 'click' in self.hooks:
self.hooks['click'](self.i_active)
print self.selectedmenu
def delete_current(self):
if self.i_active is None:
print 'no point selected'
else:
self.xdel = np.append(self.xdel, self.x[self.i_active])
self.ydel = np.append(self.ydel, self.y[self.i_active])
self.x[self.i_active] = np.nan
self.y[self.i_active] = np.nan
def plotfast2(setup):
_symbol = 'sdo+34<>^*sdo+34<>^*sdo+34<>^*sdo+34<>^*sdo+34<>^*sdo+34<>^*sdo+34<>^*sdo+34<>^*'
_color = {'u': '#2c0080', 'g': '#00ccff', 'r': '#ff7d00', 'i': '#90002c', 'z': '#000000',
'U': '#3c0072', 'B': '#0057ff', 'V': '#79ff00', 'R': '#ff7000', 'I': '#80000d'}
_shift = {'U': -3, 'B': -2, 'V': -1, 'R': 0, 'I': 1, 'u': -2, 'g': -1, 'r': 0, 'i': 1, 'z': 2}
filenames = []
mjds = []
mags = []
shifts = []
for _telescope in setup:
for _filter in setup[_telescope]:
filenames += setup[_telescope][_filter]['filename']
mjds += setup[_telescope][_filter]['mjd']
mags += setup[_telescope][_filter]['mag']
shifts += [_shift[lsc.sites.filterst1[_filter]]] * len(setup[_telescope][_filter]['mag'])
def plot_hook():
plt.figure(1)
plt.gca().invert_yaxis()
plt.xlabel('MJD')
plt.ylabel('Magnitude')
for _tel, mark in zip(setup.keys(), _symbol):
for _fil, data_dict in setup[_tel].items():
_fil = lsc.sites.filterst1[_fil]
plt.errorbar(data_dict['mjd'], np.array(data_dict['mag']) + _shift[_fil], data_dict['dmag'],
ls='none', color=_color[_fil], marker=mark, label=_tel+' '+_fil+'{:+.0f}'.format(_shift[_fil]))
plt.legend(loc='best', fontsize='small', numpoints=1)
def click_hook(i):
print filenames[i], 'selected'
print 'mjd = {:.2f}\tmag = {:.2f} ({:+d} shift on plot)'.format(mjds[i], mags[i], shifts[i])
dbrow = lsc.mysqldef.getvaluefromarchive('photlco', 'filename', filenames[i], 'filepath, mjd, mag, filetype')[0]
print 'mjd = {:.2f}\tmag = {:.2f} (from database)'.format(dbrow['mjd'], dbrow['mag'])
plt.figure(2)
display_psf_fit(filenames[i])
if int(dbrow['filetype']) == 3:
plt.figure(3, figsize=(8, 8))
display_subtraction(filenames[i])
def delete_hook(i):
lsc.mysqldef.query(['update photlco set psfmag=9999, psfdmag=9999, apmag=9999, dapmag=9999, mag=9999, dmag=9999 where filename="{}"'.format(filenames[i])], lsc.conn)
_dir = lsc.mysqldef.getvaluefromarchive('photlco', 'filename', filenames[i], 'filepath')[0]['filepath']
if _dir:
lsc.util.updateheader(_dir + filenames[i].replace('.fits', '.sn2.fits'), 0,
{'PSFMAG1': (9999, 'psf magnitude'), 'APMAG1': (9999, 'ap magnitude')})
print 'deleted', filenames[i]
def bad_hook(i):
dbrow = lsc.mysqldef.getvaluefromarchive('photlco', 'filename', filenames[i], 'filepath, filetype')[0]
if int(dbrow['filetype']) == 3:
os.system('rm -v ' + dbrow['filepath'] + filenames[i].replace('.fits', '*'))
os.system('rm -v ' + dbrow['filepath'] + filenames[i].replace('.diff', '.ref'))
lsc.mysqldef.deleteredufromarchive(filenames[i], 'photlco', 'filename')
print 'delete difference image', filenames[i]
else:
lsc.mysqldef.updatevalue('photlco', 'magtype', -1, filenames[i])
print 'marked', filenames[i], 'as bad'
def limit_hook(i):
lsc.mysqldef.updatevalue('photlco', 'quality', 1, filenames[i])
print 'changed', filenames[i], 'to upper limit'
PickablePlot(mjds, np.array(mags) + np.array(shifts),
mainmenu='Click to select a point. Press return to exit.',
selectedmenu='Enter d to delete a point, b to mark an image as bad, or u to set a point as an upper limit.',
hooks={'plot': plot_hook, 'click': click_hook, 'd': delete_hook, 'b': bad_hook, 'u': limit_hook})
##############################################################################
def plotfast(setup, output='', database='photlco'): #,band,color,fissa=''):
global idd, _mjd, _mag, _setup, _filename, shift, _database #,testo,lines,pol,sss,f,fixcol,sigmaa,sigmab,aa,bb
if not output:
plt.ion()
plt.rcParams['figure.figsize'] = 9, 5
fig = plt.figure()
plt.axes([.15, .05, .65, .85])
_symbol = 'sdo+34<>^*sdo+34<>^*sdo+34<>^*sdo+34<>^*sdo+34<>^*sdo+34<>^*sdo+34<>^*sdo+34<>^*'
_color = {'U': 'b', 'B': 'r', 'V': 'g', 'R': 'c', 'I': 'm', 'up': 'b', 'gp': 'r', 'rp': 'g', 'ip': 'c', 'zs': 'm',
'Bessell-B': 'r', 'Bessell-V': 'g', 'Bessell-R': 'c', 'Bessell-I': 'm',
'SDSS-G': 'r', 'SDSS-R': 'g', 'SDSS-I': 'c', 'Pan-Starrs-Z': 'm'}
_shift = {'U': -2, 'B': -1, 'V': 0, 'R': 1, 'I': 2, 'up': -2, 'gp': -1, 'rp': 0, 'ip': 1, 'zs': 2,
'Bessell-B': -1, 'Bessell-V': 0, 'Bessell-R': 1, 'Bessell-I': 2,
'SDSS-G': -1, 'SDSS-R': 0, 'SDSS-I': 1, 'Pan-Starrs-Z': 2}
_setup = setup
_database = database
ii = 0
mag, mjd, filename = [], [], []
for _tel in _setup:
shift = 0
for _fil in _setup[_tel]:
shift = _shift[_fil]
col = _color[_fil]
print _tel, _fil
jj = np.array(_setup[_tel][_fil][
'mjd']) #np.compress(np.array(_setup[_tel][_fil]['magtype'])>=1,np.array(_setup[_tel][_fil]['jd']))
mm = np.array(_setup[_tel][_fil][
'mag']) #np.compress(np.array(_setup[_tel][_fil]['magtype'])>=1,np.array(_setup[_tel][_fil]['mag']))
plt.plot(jj, mm + shift, _symbol[ii], color=col, label=_tel + ' ' + _fil + ' ' + str(shift), markersize=10)
jj1 = np.compress(np.array(_setup[_tel][_fil]['magtype']) < 0, np.array(_setup[_tel][_fil]['mjd']))
mm1 = np.compress(np.array(_setup[_tel][_fil]['magtype']) < 0, np.array(_setup[_tel][_fil]['mag']))
if len(mm1) > 0:
plt.errorbar(jj1, mm1, mm1 / 100, lolims=True, fmt='none', ecolor='k')
mag = list(mag) + list(np.array(_setup[_tel][_fil]['mag']) + _shift[_fil])
mjd = list(mjd) + list(_setup[_tel][_fil]['mjd'])
filename = list(filename) + list(_setup[_tel][_fil]['filename'])
ii = ii + 1
plt.xlabel('mjd')
plt.ylabel('magnitude')
plt.xlim(min(mjd) - 5, max(mjd) + 5)
plt.ylim(max(mag) + .5, min(mag) - .5)
yticklabels = plt.getp(plt.gca(), 'yticklabels')
xticklabels = plt.getp(plt.gca(), 'xticklabels')
plt.setp(xticklabels, fontsize='10')
plt.setp(yticklabels, fontsize='10')
# plt.legend(numpoints=1,markerscale=.8)
plt.legend(bbox_to_anchor=(1.01, 1), loc=2, borderaxespad=0., markerscale=.8, numpoints=1)
leg = plt.gca().get_legend()
ltext = leg.get_texts()
plt.setp(ltext, fontsize=10)
_mag = mag[:]
_mjd = mjd[:]
_filename = filename[:]
_mjd = np.array(_mjd)
_mag = np.array(_mag)
idd = range(len(_mjd))
plt.plot(_mjd, _mag, 'ok', markersize=1)
kid = fig.canvas.mpl_connect('key_press_event', onkeypress2)
# cid = fig.canvas.mpl_connect('button_press_event',onclick2)
if not output:
plt.ion()
plt.draw()
raw_input('press d to mark. Return to exit ...\n')
plt.close()
else:
plt.savefig(output.replace('.txt', '.png'), format='png')
################################################################
def subset(xx, _avg=''): # lista mjd
diff = [xx[i + 1] - xx[i] for i in range(len(xx) - 1)]
if _avg:
avg = float(_avg)
else:
avg = sum(diff) / len(diff)
if avg >= 1:
avg = .5
elif avg <= 0.1:
avg = .5
i = 1
subset = {}
position = {}
subset[1] = [xx[0]]
position[1] = [0]
for j in range(0, len(diff)):
if diff[j] > avg: i = i + 1
if i not in subset: subset[i] = []
if i not in position: position[i] = []
subset[i].append(xx[j + 1])
position[i].append(j + 1)
return subset, position
##########################################################
def process_epoch(epoch):
if epoch is None:
d = datetime.date.today() + datetime.timedelta(1)
g = d - datetime.timedelta(4)
epochs = [g.strftime("%Y%m%d"), d.strftime("%Y%m%d")]
else:
epochs = epoch.split('-')
return epochs
def get_list(epoch=None, _telescope='all', _filter='', _bad='', _name='', _id='', _ra='', _dec='', database='photlco',
filetype=1, _groupid=None, _instrument='', _temptel='', _difftype=None, classid=None, _targetid=None):
epochs = process_epoch(epoch)
lista = lsc.mysqldef.getlistfromraw(conn, database, 'dayobs', epochs[0], epochs[-1], '*', _telescope)
if lista:
ll0 = {}
for jj in lista[0].keys(): ll0[jj] = []
for i in range(0, len(lista)):
for jj in lista[0].keys(): ll0[jj].append(lista[i][jj])
inds = np.argsort(ll0['mjd']) # sort by mjd
for i in ll0.keys():
ll0[i] = np.take(ll0[i], inds)
ll0['ra'] = []
ll0['dec'] = []
if 'ra0' not in ll0.keys():
for i in ll0['filename']:
print i
ggg = lsc.mysqldef.getfromdataraw(conn, 'photlcoraw', 'filename', i, '*')
ll0['ra'].append(ggg[0]['ra0'])
ll0['dec'].append(ggg[0]['dec0'])
else:
ll0['ra'] = ll0['ra0']
ll0['dec'] = ll0['dec0']
ll = lsc.myloopdef.filtralist(ll0, _filter, _id, _name, _ra, _dec, _bad, int(filetype), _groupid, _instrument, _temptel, _difftype, classid, _targetid)
else:
ll = ''
return ll
def get_standards(epoch, name, filters):
epochs = process_epoch(epoch)
targetid = lsc.mysqldef.gettargetid(name, '', '', lsc.conn)
query = '''SELECT DISTINCT std.filepath, std.filename, std.objname, std.filter,
std.wcs, std.psf, std.psfmag, std.zcat, std.mag, std.abscat, std.lastunpacked
FROM
photlco AS obj,
photlco AS std,
targetnames AS targobj,
targets AS targstd,
telescopes AS telobj,
telescopes AS telstd,
instruments AS instobj,
instruments AS inststd
WHERE obj.telescopeid = telobj.id
AND std.telescopeid = telstd.id
AND obj.instrumentid = instobj.id
AND std.instrumentid = inststd.id
AND telobj.shortname = telstd.shortname
AND instobj.type = inststd.type
AND obj.targetid = targobj.targetid
AND std.targetid = targstd.id
AND targstd.classificationid = 1
AND obj.filter = std.filter
AND obj.dayobs = std.dayobs
AND obj.quality = 127
AND std.quality = 127
AND obj.dayobs >= {start}
AND obj.dayobs <= {end}
AND targobj.targetid = {targetid}
'''.format(start=epochs[0], end=epochs[-1], targetid=targetid)
if filters:
query += 'AND (obj.filter="' + '" OR obj.filter="'.join(lsc.sites.filterst[filters]) + '")'
print 'Searching for corresponding standard fields. This may take a minute...'
matching_stds = lsc.mysqldef.query([query], lsc.conn)
if matching_stds:
final_list = {col: [ll[col] for ll in matching_stds] for col in matching_stds[0]}
else:
final_list = {'filepath': [], 'filename': []}
return final_list
######
def check_missing(lista, database='photlco'):
if len(lista) > 0:
for i in lista:
xx = lsc.mysqldef.getfromdataraw(conn, 'photlcoraw', 'filename', str(i), column2='filepath')
yy = lsc.mysqldef.getfromdataraw(conn, database, 'filename', str(i), column2='filepath')
xx, yy = xx[0]['filepath'], yy[0]['filepath']
if not os.path.isfile(yy + i):
os.system('cp ' + xx + i + ' ' + yy + i)
print xx, str(i), yy + i
def checkfilevsdatabase(lista, database='photlco'):
if lista:
if len(lista['filename']) > 0:
for i in range(0, len(lista['filename'])):
imgsn = lista['filepath'][i] + lista['filename'].replace('.fits', '.sn2.fits')
if os.path.isfile(imgsn):
hdr1 = lsc.util.readhdr(imgsn)
_filter = lsc.util.readkey3(hdr1, 'filter')
_exptime = lsc.util.readkey3(hdr1, 'exptime')
_airmass = lsc.util.readkey3(hdr1, 'airmass')
_telescope = lsc.util.readkey3(hdr1, 'telescop')
_psfmag = lsc.util.readkey3(hdr1, 'PSFMAG1')
_psfdmag1 = lsc.util.readkey3(hdr1, 'PSFDMAG1')
_apmag = lsc.util.readkey3(hdr1, 'APMAG1')
_mag = lsc.util.readkey3(hdr1, 'MAG')
if not _mag: # mag
if lista['mag'][i] != 9999.0:
print lista['filename'][i], _mag, lista['mag'][i], 'mag'
lsc.mysqldef.updatevalue(database, 'mag', 9999.0, lista['filename'][i])
else:
if _mag == 9999.0:
if lista['mag'][i] != 9999.0:
print lista['filename'][i], _mag, lista['mag'][i], 'mag'
lsc.mysqldef.updatevalue(database, 'mag', 9999.0, lista['filename'][i])
elif _mag != 9999.0:
if round(lista['mag'][i], 4) != round(float(_mag), 4):
print lista['filename'][i], _mag, lista['mag'][i], 'mag'
lsc.mysqldef.updatevalue(database, 'mag', _mag, lista['filename'][i])
if not _psfmag: # psfmag
if lista['psfmag'][i] != 9999.0:
print lista['filename'][i], _mag, lista['psfmag'][i], 'psfmag'
lsc.mysqldef.updatevalue(database, 'psfmag', 9999.0, lista['filename'][i])
else:
if _psfmag == 9999.0:
if lista['psfmag'][i] != 9999.0:
print lista['filename'][i], _psfmag, lista['psfmag'][i], 'psfmag'
lsc.mysqldef.updatevalue(database, 'psfmag', 9999.0, lista['filename'][i])
elif _psfmag != 9999.0:
if round(lista['psfmag'][i], 4) != round(float(_psfmag), 4):
print lista['filename'][i], _psfmag, lista['psfmag'][i], 'psfmag'
lsc.mysqldef.updatevalue(database, 'psfmag', _psfmag, lista['filename'][i])
if not _apmag: # apmag
if lista['mag'][i] != 9999.0:
print lista['filename'][i], _mag, lista['mag'][i], 'apmag'
lsc.mysqldef.updatevalue(database, 'apmag', 9999.0, lista['filename'][i])
else:
if _apmag == 9999.0:
if lista['apmag'][i] != 9999.0:
print lista['filename'][i], _apmag, lista['apmag'][i], 'apmag'
lsc.mysqldef.updatevalue(database, 'apmag', 9999.0, lista['filename'][i])
elif _apmag != 9999.0:
if round(lista['apmag'][i], 4) != round(float(_apmag), 4):
print lista['filename'][i], _apmag, lista['apmag'][i], 'apmag'
lsc.mysqldef.updatevalue(database, 'apmag', _apmag, lista['filename'][i])
#########################################################################################
def run_merge(imglist, _redu=False):
status = []
stat = 'psf'
for img in imglist:
status.append(checkstage(os.path.basename(img), stat))
print imglist
print status
imglist = imglist[np.where(np.array(status) > 0)]
status = np.array(status)[np.where(np.array(status) > 0)]
f = open('_tmp.list', 'w')
for jj in range(0, len(imglist)):
f.write(imglist[jj] + '\n')
f.close()
if _redu:
ii = ' -f '
else:
ii = ''
# if _fix: ff=' -c '
# else: ff=''
# tt=' -t '+_type+' '
command = 'lscmerge.py _tmp.list ' + ii #+tt+ff
print command
os.system(command)
########################################################################################
def run_ingestsloan(imglist,imgtype = 'sloan', ps1frames='', show=False, force=False):
command = 'lscingestsloan.py ' + ' '.join(imglist)
if imgtype != 'sloan':
command += ' --type ' + imgtype
if ps1frames:
command += ' --ps1frames ' + ps1frames
if show:
command += ' --show'
if force:
command += ' -F'
print command
os.system(command)
#####################################################################
def run_diff(listtar, listtemp, _show=False, _force=False, _normalize='i', _convolve='', _bgo=3, _fixpix=False, _difftype=None, suffix='.diff.fits',
use_mask=True, no_iraf=False, pixstack_limit=None):
status = []
stat = 'psf'
for img in listtar:
status.append(checkstage(os.path.basename(img), stat))
listtar = listtar[np.where(np.array(status) > 0)]
status = np.array(status)[np.where(np.array(status) > 0)]
f = open('_tar.list', 'w')
for jj in range(0, len(listtar)):
f.write(listtar[jj] + '\n')
f.close()
f = open('_temp.list', 'w')
for jj in range(0, len(listtemp)):
f.write(listtemp[jj] + '\n')
f.close()
if _show:
ii = ' --show '
else:
ii = ''
if _force:
ff = ' -f '
else:
ff = ' '
if _convolve:
_convolve = ' --convolve '+_convolve+' '
else:
_convolve=''
if _bgo:
_bgo=' --bgo '+str(_bgo)
else:
_bgo=''
if _fixpix:
fixpix = ' --fixpix '
else:
fixpix = ''
if _difftype is not None:
difftype = ' --difftype ' + str(_difftype)
else:
difftype = ''
if use_mask:
mask = ''
else:
mask = ' --unmask'
if no_iraf:
iraf = ' --no-iraf'
else:
iraf = ''
if pixstack_limit is not None:
pixstack_text = ' --pixstack-limit {}'.format(pixstack_limit)
else:
pixstack_text = ''
command = 'lscdiff.py _tar.list _temp.list ' + ii + ff + '--normalize ' + _normalize + _convolve + _bgo + fixpix + difftype + ' --suffix ' + suffix + mask + iraf + pixstack_text
print command
os.system(command)
######################################################################3
def run_template(listtemp, show=False, _force=False, _interactive=False, _ra=None, _dec=None, _psf=None, _mag=0, _clean=True, _subtract_mag_from_header=False):
status = []
stat = 'psf'
for img in listtemp: status.append(checkstage(os.path.basename(img), stat))
listtemp = listtemp[np.where(np.array(status) > 0)]
status = np.array(status)[np.where(np.array(status) > 0)]
f = open('_temp.list', 'w')
for jj in range(0, len(listtemp)):
f.write(listtemp[jj] + '\n')
f.close()
command = 'lscmaketempl.py _temp.list'
if show:
command += ' --show'
if _force:
command += ' -f'
if _interactive:
command += ' -i'
if _ra:
command += ' -R ' + str(_ra)
if _dec:
command += ' -D ' + str(_dec)
if _psf:
command += ' -p ' + _psf
if _mag:
command += ' --mag ' + str(_mag)
if not _clean:
command += ' --uncleaned'
if _subtract_mag_from_header:
command += ' --subtract-mag-from-header'
print command
os.system(command)
#####################################################################
def getsky(data):
"""
Determine the sky parameters for a FITS data extension.
data -- array holding the image data
"""
# maximum number of interations for mean,std loop
maxiter = 30
# maximum number of data points to sample
maxsample = 10000
# size of the array
ny, nx = data.shape
# how many sampels should we take?
if data.size > maxsample:
nsample = maxsample
else:
nsample = data.size
# create sample indicies
xs = np.random.uniform(low=0, high=nx, size=nsample).astype('L')
ys = np.random.uniform(low=0, high=ny, size=nsample).astype('L')
# sample the data
sample = data[ys, xs].copy()
sample = sample.reshape(nsample)
# determine the clipped mean and standard deviation
mean = sample.mean()
std = sample.std()
oldsize = 0
niter = 0
while oldsize != sample.size and niter < maxiter:
niter += 1
oldsize = sample.size
wok = (sample < mean + 3 * std)
sample = sample[wok]
wok = (sample > mean - 3 * std)
sample = sample[wok]
mean = sample.mean()
std = sample.std()
return mean, std
###################################################################
def run_cosmic(imglist, database='photlco', _sigclip=4.5, _sigfrac=0.2, _objlim=4, _force=False):
######## SV 20161129 add multiprocess
for ggg in imglist:
_dir,img = os.path.split(ggg)
if _dir:
_dir = _dir+'/'
print _dir + img
if os.path.isfile(_dir + img):
if os.path.isfile(_dir + img.replace('.fits', '.var.fits')):
print 'variance image found'
os.system('cp '+_dir + img+' '+_dir + img.replace('.fits', '.clean.fits'))
ar, hd = fits.getdata(_dir + img, header=True)
out_fits = fits.PrimaryHDU(header=hd,data=(ar-ar).astype('uint8'))
out_fits.writeto(_dir + img.replace('.fits', '.mask.fits'), overwrite=True, output_verify='fix')
else:
if not os.path.isfile(_dir + img.replace('.fits', '.clean.fits')) or not os.path.isfile(_dir + img.replace('.fits', '.mask.fits')) or _force:
output, mask, satu = lsc.util.Docosmic(_dir + img, _sigclip, _sigfrac, _objlim)
lsc.util.updateheader(output, 0, {'DOCOSMIC': (True, 'Cosmic rejection using LACosmic')})
print 'mv ' + output + ' ' + _dir
os.system('mv ' + output + ' ' + _dir)
os.system('mv ' + mask + ' ' + _dir)
os.system('mv ' + satu + ' ' + _dir)
print output, mask, satu
else:
print 'cosmic rejection already done'
else:
print img, ' not found'
###################################################################
def run_apmag(imglist, database='photlco'):
for img in imglist:
ggg = lsc.mysqldef.getfromdataraw(lsc.conn, database, 'filename', str(img), '*')
if ggg:
_dir = ggg[0]['filepath']
img1 = img.replace('.fits', '.sn2.fits')
print _dir + img1
if os.path.isfile(_dir + img1):
command = 'lscnewcalib.py ' + _dir + img1
print command
os.system(command)
else:
print img1, ' not found'
###################################################################
|
svalenti/lcogtsnpipe
|
trunk/src/lsc/myloopdef.py
|
Python
|
mit
| 97,882
|
[
"Gaussian"
] |
844d29b991ee9c4efb3d647e45f1e76e8bfe48371d46b8c0668a194c7977eff6
|
import unittest
from test.support import (verbose, refcount_test, run_unittest,
strip_python_stderr, cpython_only, start_threads,
temp_dir, requires_type_collecting)
from test.support.script_helper import assert_python_ok, make_script
import sys
import time
import gc
import weakref
try:
import threading
except ImportError:
threading = None
try:
from _testcapi import with_tp_del
except ImportError:
def with_tp_del(cls):
class C(object):
def __new__(cls, *args, **kwargs):
raise TypeError('requires _testcapi.with_tp_del')
return C
### Support code
###############################################################################
# Bug 1055820 has several tests of longstanding bugs involving weakrefs and
# cyclic gc.
# An instance of C1055820 has a self-loop, so becomes cyclic trash when
# unreachable.
class C1055820(object):
def __init__(self, i):
self.i = i
self.loop = self
class GC_Detector(object):
# Create an instance I. Then gc hasn't happened again so long as
# I.gc_happened is false.
def __init__(self):
self.gc_happened = False
def it_happened(ignored):
self.gc_happened = True
# Create a piece of cyclic trash that triggers it_happened when
# gc collects it.
self.wr = weakref.ref(C1055820(666), it_happened)
@with_tp_del
class Uncollectable(object):
"""Create a reference cycle with multiple __del__ methods.
An object in a reference cycle will never have zero references,
and so must be garbage collected. If one or more objects in the
cycle have __del__ methods, the gc refuses to guess an order,
and leaves the cycle uncollected."""
def __init__(self, partner=None):
if partner is None:
self.partner = Uncollectable(partner=self)
else:
self.partner = partner
def __tp_del__(self):
pass
### Tests
###############################################################################
class GCTests(unittest.TestCase):
def test_list(self):
l = []
l.append(l)
gc.collect()
del l
self.assertEqual(gc.collect(), 1)
def test_dict(self):
d = {}
d[1] = d
gc.collect()
del d
self.assertEqual(gc.collect(), 1)
def test_tuple(self):
# since tuples are immutable we close the loop with a list
l = []
t = (l,)
l.append(t)
gc.collect()
del t
del l
self.assertEqual(gc.collect(), 2)
def test_class(self):
class A:
pass
A.a = A
gc.collect()
del A
self.assertNotEqual(gc.collect(), 0)
def test_newstyleclass(self):
class A(object):
pass
gc.collect()
del A
self.assertNotEqual(gc.collect(), 0)
def test_instance(self):
class A:
pass
a = A()
a.a = a
gc.collect()
del a
self.assertNotEqual(gc.collect(), 0)
@requires_type_collecting
def test_newinstance(self):
class A(object):
pass
a = A()
a.a = a
gc.collect()
del a
self.assertNotEqual(gc.collect(), 0)
class B(list):
pass
class C(B, A):
pass
a = C()
a.a = a
gc.collect()
del a
self.assertNotEqual(gc.collect(), 0)
del B, C
self.assertNotEqual(gc.collect(), 0)
A.a = A()
del A
self.assertNotEqual(gc.collect(), 0)
self.assertEqual(gc.collect(), 0)
def test_method(self):
# Tricky: self.__init__ is a bound method, it references the instance.
class A:
def __init__(self):
self.init = self.__init__
a = A()
gc.collect()
del a
self.assertNotEqual(gc.collect(), 0)
@cpython_only
def test_legacy_finalizer(self):
# A() is uncollectable if it is part of a cycle, make sure it shows up
# in gc.garbage.
@with_tp_del
class A:
def __tp_del__(self): pass
class B:
pass
a = A()
a.a = a
id_a = id(a)
b = B()
b.b = b
gc.collect()
del a
del b
self.assertNotEqual(gc.collect(), 0)
for obj in gc.garbage:
if id(obj) == id_a:
del obj.a
break
else:
self.fail("didn't find obj in garbage (finalizer)")
gc.garbage.remove(obj)
@cpython_only
def test_legacy_finalizer_newclass(self):
# A() is uncollectable if it is part of a cycle, make sure it shows up
# in gc.garbage.
@with_tp_del
class A(object):
def __tp_del__(self): pass
class B(object):
pass
a = A()
a.a = a
id_a = id(a)
b = B()
b.b = b
gc.collect()
del a
del b
self.assertNotEqual(gc.collect(), 0)
for obj in gc.garbage:
if id(obj) == id_a:
del obj.a
break
else:
self.fail("didn't find obj in garbage (finalizer)")
gc.garbage.remove(obj)
def test_function(self):
# Tricky: f -> d -> f, code should call d.clear() after the exec to
# break the cycle.
d = {}
exec("def f(): pass\n", d)
gc.collect()
del d
self.assertEqual(gc.collect(), 2)
@refcount_test
def test_frame(self):
def f():
frame = sys._getframe()
gc.collect()
f()
self.assertEqual(gc.collect(), 1)
def test_saveall(self):
# Verify that cyclic garbage like lists show up in gc.garbage if the
# SAVEALL option is enabled.
# First make sure we don't save away other stuff that just happens to
# be waiting for collection.
gc.collect()
# if this fails, someone else created immortal trash
self.assertEqual(gc.garbage, [])
L = []
L.append(L)
id_L = id(L)
debug = gc.get_debug()
gc.set_debug(debug | gc.DEBUG_SAVEALL)
del L
gc.collect()
gc.set_debug(debug)
self.assertEqual(len(gc.garbage), 1)
obj = gc.garbage.pop()
self.assertEqual(id(obj), id_L)
def test_del(self):
# __del__ methods can trigger collection, make this to happen
thresholds = gc.get_threshold()
gc.enable()
gc.set_threshold(1)
class A:
def __del__(self):
dir(self)
a = A()
del a
gc.disable()
gc.set_threshold(*thresholds)
def test_del_newclass(self):
# __del__ methods can trigger collection, make this to happen
thresholds = gc.get_threshold()
gc.enable()
gc.set_threshold(1)
class A(object):
def __del__(self):
dir(self)
a = A()
del a
gc.disable()
gc.set_threshold(*thresholds)
# The following two tests are fragile:
# They precisely count the number of allocations,
# which is highly implementation-dependent.
# For example, disposed tuples are not freed, but reused.
# To minimize variations, though, we first store the get_count() results
# and check them at the end.
@refcount_test
def test_get_count(self):
gc.collect()
a, b, c = gc.get_count()
x = []
d, e, f = gc.get_count()
self.assertEqual((b, c), (0, 0))
self.assertEqual((e, f), (0, 0))
# This is less fragile than asserting that a equals 0.
self.assertLess(a, 5)
# Between the two calls to get_count(), at least one object was
# created (the list).
self.assertGreater(d, a)
@refcount_test
def test_collect_generations(self):
gc.collect()
# This object will "trickle" into generation N + 1 after
# each call to collect(N)
x = []
gc.collect(0)
# x is now in gen 1
a, b, c = gc.get_count()
gc.collect(1)
# x is now in gen 2
d, e, f = gc.get_count()
gc.collect(2)
# x is now in gen 3
g, h, i = gc.get_count()
# We don't check a, d, g since their exact values depends on
# internal implementation details of the interpreter.
self.assertEqual((b, c), (1, 0))
self.assertEqual((e, f), (0, 1))
self.assertEqual((h, i), (0, 0))
def test_trashcan(self):
class Ouch:
n = 0
def __del__(self):
Ouch.n = Ouch.n + 1
if Ouch.n % 17 == 0:
gc.collect()
# "trashcan" is a hack to prevent stack overflow when deallocating
# very deeply nested tuples etc. It works in part by abusing the
# type pointer and refcount fields, and that can yield horrible
# problems when gc tries to traverse the structures.
# If this test fails (as it does in 2.0, 2.1 and 2.2), it will
# most likely die via segfault.
# Note: In 2.3 the possibility for compiling without cyclic gc was
# removed, and that in turn allows the trashcan mechanism to work
# via much simpler means (e.g., it never abuses the type pointer or
# refcount fields anymore). Since it's much less likely to cause a
# problem now, the various constants in this expensive (we force a lot
# of full collections) test are cut back from the 2.2 version.
gc.enable()
N = 150
for count in range(2):
t = []
for i in range(N):
t = [t, Ouch()]
u = []
for i in range(N):
u = [u, Ouch()]
v = {}
for i in range(N):
v = {1: v, 2: Ouch()}
gc.disable()
@unittest.skipUnless(threading, "test meaningless on builds without threads")
def test_trashcan_threads(self):
# Issue #13992: trashcan mechanism should be thread-safe
NESTING = 60
N_THREADS = 2
def sleeper_gen():
"""A generator that releases the GIL when closed or dealloc'ed."""
try:
yield
finally:
time.sleep(0.000001)
class C(list):
# Appending to a list is atomic, which avoids the use of a lock.
inits = []
dels = []
def __init__(self, alist):
self[:] = alist
C.inits.append(None)
def __del__(self):
# This __del__ is called by subtype_dealloc().
C.dels.append(None)
# `g` will release the GIL when garbage-collected. This
# helps assert subtype_dealloc's behaviour when threads
# switch in the middle of it.
g = sleeper_gen()
next(g)
# Now that __del__ is finished, subtype_dealloc will proceed
# to call list_dealloc, which also uses the trashcan mechanism.
def make_nested():
"""Create a sufficiently nested container object so that the
trashcan mechanism is invoked when deallocating it."""
x = C([])
for i in range(NESTING):
x = [C([x])]
del x
def run_thread():
"""Exercise make_nested() in a loop."""
while not exit:
make_nested()
old_switchinterval = sys.getswitchinterval()
sys.setswitchinterval(1e-5)
try:
exit = []
threads = []
for i in range(N_THREADS):
t = threading.Thread(target=run_thread)
threads.append(t)
with start_threads(threads, lambda: exit.append(1)):
time.sleep(1.0)
finally:
sys.setswitchinterval(old_switchinterval)
gc.collect()
self.assertEqual(len(C.inits), len(C.dels))
def test_boom(self):
class Boom:
def __getattr__(self, someattribute):
del self.attr
raise AttributeError
a = Boom()
b = Boom()
a.attr = b
b.attr = a
gc.collect()
garbagelen = len(gc.garbage)
del a, b
# a<->b are in a trash cycle now. Collection will invoke
# Boom.__getattr__ (to see whether a and b have __del__ methods), and
# __getattr__ deletes the internal "attr" attributes as a side effect.
# That causes the trash cycle to get reclaimed via refcounts falling to
# 0, thus mutating the trash graph as a side effect of merely asking
# whether __del__ exists. This used to (before 2.3b1) crash Python.
# Now __getattr__ isn't called.
self.assertEqual(gc.collect(), 4)
self.assertEqual(len(gc.garbage), garbagelen)
def test_boom2(self):
class Boom2:
def __init__(self):
self.x = 0
def __getattr__(self, someattribute):
self.x += 1
if self.x > 1:
del self.attr
raise AttributeError
a = Boom2()
b = Boom2()
a.attr = b
b.attr = a
gc.collect()
garbagelen = len(gc.garbage)
del a, b
# Much like test_boom(), except that __getattr__ doesn't break the
# cycle until the second time gc checks for __del__. As of 2.3b1,
# there isn't a second time, so this simply cleans up the trash cycle.
# We expect a, b, a.__dict__ and b.__dict__ (4 objects) to get
# reclaimed this way.
self.assertEqual(gc.collect(), 4)
self.assertEqual(len(gc.garbage), garbagelen)
def test_boom_new(self):
# boom__new and boom2_new are exactly like boom and boom2, except use
# new-style classes.
class Boom_New(object):
def __getattr__(self, someattribute):
del self.attr
raise AttributeError
a = Boom_New()
b = Boom_New()
a.attr = b
b.attr = a
gc.collect()
garbagelen = len(gc.garbage)
del a, b
self.assertEqual(gc.collect(), 4)
self.assertEqual(len(gc.garbage), garbagelen)
def test_boom2_new(self):
class Boom2_New(object):
def __init__(self):
self.x = 0
def __getattr__(self, someattribute):
self.x += 1
if self.x > 1:
del self.attr
raise AttributeError
a = Boom2_New()
b = Boom2_New()
a.attr = b
b.attr = a
gc.collect()
garbagelen = len(gc.garbage)
del a, b
self.assertEqual(gc.collect(), 4)
self.assertEqual(len(gc.garbage), garbagelen)
def test_get_referents(self):
alist = [1, 3, 5]
got = gc.get_referents(alist)
got.sort()
self.assertEqual(got, alist)
atuple = tuple(alist)
got = gc.get_referents(atuple)
got.sort()
self.assertEqual(got, alist)
adict = {1: 3, 5: 7}
expected = [1, 3, 5, 7]
got = gc.get_referents(adict)
got.sort()
self.assertEqual(got, expected)
got = gc.get_referents([1, 2], {3: 4}, (0, 0, 0))
got.sort()
self.assertEqual(got, [0, 0] + list(range(5)))
self.assertEqual(gc.get_referents(1, 'a', 4j), [])
def test_is_tracked(self):
# Atomic built-in types are not tracked, user-defined objects and
# mutable containers are.
# NOTE: types with special optimizations (e.g. tuple) have tests
# in their own test files instead.
self.assertFalse(gc.is_tracked(None))
self.assertFalse(gc.is_tracked(1))
self.assertFalse(gc.is_tracked(1.0))
self.assertFalse(gc.is_tracked(1.0 + 5.0j))
self.assertFalse(gc.is_tracked(True))
self.assertFalse(gc.is_tracked(False))
self.assertFalse(gc.is_tracked(b"a"))
self.assertFalse(gc.is_tracked("a"))
self.assertFalse(gc.is_tracked(bytearray(b"a")))
self.assertFalse(gc.is_tracked(type))
self.assertFalse(gc.is_tracked(int))
self.assertFalse(gc.is_tracked(object))
self.assertFalse(gc.is_tracked(object()))
class UserClass:
pass
class UserInt(int):
pass
# Base class is object; no extra fields.
class UserClassSlots:
__slots__ = ()
# Base class is fixed size larger than object; no extra fields.
class UserFloatSlots(float):
__slots__ = ()
# Base class is variable size; no extra fields.
class UserIntSlots(int):
__slots__ = ()
self.assertTrue(gc.is_tracked(gc))
self.assertTrue(gc.is_tracked(UserClass))
self.assertTrue(gc.is_tracked(UserClass()))
self.assertTrue(gc.is_tracked(UserInt()))
self.assertTrue(gc.is_tracked([]))
self.assertTrue(gc.is_tracked(set()))
self.assertFalse(gc.is_tracked(UserClassSlots()))
self.assertFalse(gc.is_tracked(UserFloatSlots()))
self.assertFalse(gc.is_tracked(UserIntSlots()))
def test_bug1055820b(self):
# Corresponds to temp2b.py in the bug report.
ouch = []
def callback(ignored):
ouch[:] = [wr() for wr in WRs]
Cs = [C1055820(i) for i in range(2)]
WRs = [weakref.ref(c, callback) for c in Cs]
c = None
gc.collect()
self.assertEqual(len(ouch), 0)
# Make the two instances trash, and collect again. The bug was that
# the callback materialized a strong reference to an instance, but gc
# cleared the instance's dict anyway.
Cs = None
gc.collect()
self.assertEqual(len(ouch), 2) # else the callbacks didn't run
for x in ouch:
# If the callback resurrected one of these guys, the instance
# would be damaged, with an empty __dict__.
self.assertEqual(x, None)
def test_bug21435(self):
# This is a poor test - its only virtue is that it happened to
# segfault on Tim's Windows box before the patch for 21435 was
# applied. That's a nasty bug relying on specific pieces of cyclic
# trash appearing in exactly the right order in finalize_garbage()'s
# input list.
# But there's no reliable way to force that order from Python code,
# so over time chances are good this test won't really be testing much
# of anything anymore. Still, if it blows up, there's _some_
# problem ;-)
gc.collect()
class A:
pass
class B:
def __init__(self, x):
self.x = x
def __del__(self):
self.attr = None
def do_work():
a = A()
b = B(A())
a.attr = b
b.attr = a
do_work()
gc.collect() # this blows up (bad C pointer) when it fails
@cpython_only
def test_garbage_at_shutdown(self):
import subprocess
code = """if 1:
import gc
import _testcapi
@_testcapi.with_tp_del
class X:
def __init__(self, name):
self.name = name
def __repr__(self):
return "<X %%r>" %% self.name
def __tp_del__(self):
pass
x = X('first')
x.x = x
x.y = X('second')
del x
gc.set_debug(%s)
"""
def run_command(code):
p = subprocess.Popen([sys.executable, "-Wd", "-c", code],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
p.stdout.close()
p.stderr.close()
self.assertEqual(p.returncode, 0)
self.assertEqual(stdout.strip(), b"")
return strip_python_stderr(stderr)
stderr = run_command(code % "0")
self.assertIn(b"ResourceWarning: gc: 2 uncollectable objects at "
b"shutdown; use", stderr)
self.assertNotIn(b"<X 'first'>", stderr)
# With DEBUG_UNCOLLECTABLE, the garbage list gets printed
stderr = run_command(code % "gc.DEBUG_UNCOLLECTABLE")
self.assertIn(b"ResourceWarning: gc: 2 uncollectable objects at "
b"shutdown", stderr)
self.assertTrue(
(b"[<X 'first'>, <X 'second'>]" in stderr) or
(b"[<X 'second'>, <X 'first'>]" in stderr), stderr)
# With DEBUG_SAVEALL, no additional message should get printed
# (because gc.garbage also contains normally reclaimable cyclic
# references, and its elements get printed at runtime anyway).
stderr = run_command(code % "gc.DEBUG_SAVEALL")
self.assertNotIn(b"uncollectable objects at shutdown", stderr)
@requires_type_collecting
def test_gc_main_module_at_shutdown(self):
# Create a reference cycle through the __main__ module and check
# it gets collected at interpreter shutdown.
code = """if 1:
import weakref
class C:
def __del__(self):
print('__del__ called')
l = [C()]
l.append(l)
"""
rc, out, err = assert_python_ok('-c', code)
self.assertEqual(out.strip(), b'__del__ called')
@requires_type_collecting
def test_gc_ordinary_module_at_shutdown(self):
# Same as above, but with a non-__main__ module.
with temp_dir() as script_dir:
module = """if 1:
import weakref
class C:
def __del__(self):
print('__del__ called')
l = [C()]
l.append(l)
"""
code = """if 1:
import sys
sys.path.insert(0, %r)
import gctest
""" % (script_dir,)
make_script(script_dir, 'gctest', module)
rc, out, err = assert_python_ok('-c', code)
self.assertEqual(out.strip(), b'__del__ called')
def test_get_stats(self):
stats = gc.get_stats()
self.assertEqual(len(stats), 3)
for st in stats:
self.assertIsInstance(st, dict)
self.assertEqual(set(st),
{"collected", "collections", "uncollectable"})
self.assertGreaterEqual(st["collected"], 0)
self.assertGreaterEqual(st["collections"], 0)
self.assertGreaterEqual(st["uncollectable"], 0)
# Check that collection counts are incremented correctly
if gc.isenabled():
self.addCleanup(gc.enable)
gc.disable()
old = gc.get_stats()
gc.collect(0)
new = gc.get_stats()
self.assertEqual(new[0]["collections"], old[0]["collections"] + 1)
self.assertEqual(new[1]["collections"], old[1]["collections"])
self.assertEqual(new[2]["collections"], old[2]["collections"])
gc.collect(2)
new = gc.get_stats()
self.assertEqual(new[0]["collections"], old[0]["collections"] + 1)
self.assertEqual(new[1]["collections"], old[1]["collections"])
self.assertEqual(new[2]["collections"], old[2]["collections"] + 1)
class GCCallbackTests(unittest.TestCase):
def setUp(self):
# Save gc state and disable it.
self.enabled = gc.isenabled()
gc.disable()
self.debug = gc.get_debug()
gc.set_debug(0)
gc.callbacks.append(self.cb1)
gc.callbacks.append(self.cb2)
self.othergarbage = []
def tearDown(self):
# Restore gc state
del self.visit
gc.callbacks.remove(self.cb1)
gc.callbacks.remove(self.cb2)
gc.set_debug(self.debug)
if self.enabled:
gc.enable()
# destroy any uncollectables
gc.collect()
for obj in gc.garbage:
if isinstance(obj, Uncollectable):
obj.partner = None
del gc.garbage[:]
del self.othergarbage
gc.collect()
def preclean(self):
# Remove all fluff from the system. Invoke this function
# manually rather than through self.setUp() for maximum
# safety.
self.visit = []
gc.collect()
garbage, gc.garbage[:] = gc.garbage[:], []
self.othergarbage.append(garbage)
self.visit = []
def cb1(self, phase, info):
self.visit.append((1, phase, dict(info)))
def cb2(self, phase, info):
self.visit.append((2, phase, dict(info)))
if phase == "stop" and hasattr(self, "cleanup"):
# Clean Uncollectable from garbage
uc = [e for e in gc.garbage if isinstance(e, Uncollectable)]
gc.garbage[:] = [e for e in gc.garbage
if not isinstance(e, Uncollectable)]
for e in uc:
e.partner = None
def test_collect(self):
self.preclean()
gc.collect()
# Algorithmically verify the contents of self.visit
# because it is long and tortuous.
# Count the number of visits to each callback
n = [v[0] for v in self.visit]
n1 = [i for i in n if i == 1]
n2 = [i for i in n if i == 2]
self.assertEqual(n1, [1]*2)
self.assertEqual(n2, [2]*2)
# Count that we got the right number of start and stop callbacks.
n = [v[1] for v in self.visit]
n1 = [i for i in n if i == "start"]
n2 = [i for i in n if i == "stop"]
self.assertEqual(n1, ["start"]*2)
self.assertEqual(n2, ["stop"]*2)
# Check that we got the right info dict for all callbacks
for v in self.visit:
info = v[2]
self.assertTrue("generation" in info)
self.assertTrue("collected" in info)
self.assertTrue("uncollectable" in info)
def test_collect_generation(self):
self.preclean()
gc.collect(2)
for v in self.visit:
info = v[2]
self.assertEqual(info["generation"], 2)
@cpython_only
def test_collect_garbage(self):
self.preclean()
# Each of these cause four objects to be garbage: Two
# Uncolectables and their instance dicts.
Uncollectable()
Uncollectable()
C1055820(666)
gc.collect()
for v in self.visit:
if v[1] != "stop":
continue
info = v[2]
self.assertEqual(info["collected"], 2)
self.assertEqual(info["uncollectable"], 8)
# We should now have the Uncollectables in gc.garbage
self.assertEqual(len(gc.garbage), 4)
for e in gc.garbage:
self.assertIsInstance(e, Uncollectable)
# Now, let our callback handle the Uncollectable instances
self.cleanup=True
self.visit = []
gc.garbage[:] = []
gc.collect()
for v in self.visit:
if v[1] != "stop":
continue
info = v[2]
self.assertEqual(info["collected"], 0)
self.assertEqual(info["uncollectable"], 4)
# Uncollectables should be gone
self.assertEqual(len(gc.garbage), 0)
class GCTogglingTests(unittest.TestCase):
def setUp(self):
gc.enable()
def tearDown(self):
gc.disable()
def test_bug1055820c(self):
# Corresponds to temp2c.py in the bug report. This is pretty
# elaborate.
c0 = C1055820(0)
# Move c0 into generation 2.
gc.collect()
c1 = C1055820(1)
c1.keep_c0_alive = c0
del c0.loop # now only c1 keeps c0 alive
c2 = C1055820(2)
c2wr = weakref.ref(c2) # no callback!
ouch = []
def callback(ignored):
ouch[:] = [c2wr()]
# The callback gets associated with a wr on an object in generation 2.
c0wr = weakref.ref(c0, callback)
c0 = c1 = c2 = None
# What we've set up: c0, c1, and c2 are all trash now. c0 is in
# generation 2. The only thing keeping it alive is that c1 points to
# it. c1 and c2 are in generation 0, and are in self-loops. There's a
# global weakref to c2 (c2wr), but that weakref has no callback.
# There's also a global weakref to c0 (c0wr), and that does have a
# callback, and that callback references c2 via c2wr().
#
# c0 has a wr with callback, which references c2wr
# ^
# |
# | Generation 2 above dots
#. . . . . . . .|. . . . . . . . . . . . . . . . . . . . . . . .
# | Generation 0 below dots
# |
# |
# ^->c1 ^->c2 has a wr but no callback
# | | | |
# <--v <--v
#
# So this is the nightmare: when generation 0 gets collected, we see
# that c2 has a callback-free weakref, and c1 doesn't even have a
# weakref. Collecting generation 0 doesn't see c0 at all, and c0 is
# the only object that has a weakref with a callback. gc clears c1
# and c2. Clearing c1 has the side effect of dropping the refcount on
# c0 to 0, so c0 goes away (despite that it's in an older generation)
# and c0's wr callback triggers. That in turn materializes a reference
# to c2 via c2wr(), but c2 gets cleared anyway by gc.
# We want to let gc happen "naturally", to preserve the distinction
# between generations.
junk = []
i = 0
detector = GC_Detector()
while not detector.gc_happened:
i += 1
if i > 10000:
self.fail("gc didn't happen after 10000 iterations")
self.assertEqual(len(ouch), 0)
junk.append([]) # this will eventually trigger gc
self.assertEqual(len(ouch), 1) # else the callback wasn't invoked
for x in ouch:
# If the callback resurrected c2, the instance would be damaged,
# with an empty __dict__.
self.assertEqual(x, None)
def test_bug1055820d(self):
# Corresponds to temp2d.py in the bug report. This is very much like
# test_bug1055820c, but uses a __del__ method instead of a weakref
# callback to sneak in a resurrection of cyclic trash.
ouch = []
class D(C1055820):
def __del__(self):
ouch[:] = [c2wr()]
d0 = D(0)
# Move all the above into generation 2.
gc.collect()
c1 = C1055820(1)
c1.keep_d0_alive = d0
del d0.loop # now only c1 keeps d0 alive
c2 = C1055820(2)
c2wr = weakref.ref(c2) # no callback!
d0 = c1 = c2 = None
# What we've set up: d0, c1, and c2 are all trash now. d0 is in
# generation 2. The only thing keeping it alive is that c1 points to
# it. c1 and c2 are in generation 0, and are in self-loops. There's
# a global weakref to c2 (c2wr), but that weakref has no callback.
# There are no other weakrefs.
#
# d0 has a __del__ method that references c2wr
# ^
# |
# | Generation 2 above dots
#. . . . . . . .|. . . . . . . . . . . . . . . . . . . . . . . .
# | Generation 0 below dots
# |
# |
# ^->c1 ^->c2 has a wr but no callback
# | | | |
# <--v <--v
#
# So this is the nightmare: when generation 0 gets collected, we see
# that c2 has a callback-free weakref, and c1 doesn't even have a
# weakref. Collecting generation 0 doesn't see d0 at all. gc clears
# c1 and c2. Clearing c1 has the side effect of dropping the refcount
# on d0 to 0, so d0 goes away (despite that it's in an older
# generation) and d0's __del__ triggers. That in turn materializes
# a reference to c2 via c2wr(), but c2 gets cleared anyway by gc.
# We want to let gc happen "naturally", to preserve the distinction
# between generations.
detector = GC_Detector()
junk = []
i = 0
while not detector.gc_happened:
i += 1
if i > 10000:
self.fail("gc didn't happen after 10000 iterations")
self.assertEqual(len(ouch), 0)
junk.append([]) # this will eventually trigger gc
self.assertEqual(len(ouch), 1) # else __del__ wasn't invoked
for x in ouch:
# If __del__ resurrected c2, the instance would be damaged, with an
# empty __dict__.
self.assertEqual(x, None)
def test_main():
enabled = gc.isenabled()
gc.disable()
assert not gc.isenabled()
debug = gc.get_debug()
gc.set_debug(debug & ~gc.DEBUG_LEAK) # this test is supposed to leak
try:
gc.collect() # Delete 2nd generation garbage
run_unittest(GCTests, GCTogglingTests, GCCallbackTests)
finally:
gc.set_debug(debug)
# test gc.enable() even if GC is disabled by default
if verbose:
print("restoring automatic collection")
# make sure to always test gc.enable()
gc.enable()
assert gc.isenabled()
if not enabled:
gc.disable()
if __name__ == "__main__":
test_main()
|
batermj/algorithm-challenger
|
code-analysis/programming_anguage/python/source_codes/Python3.5.9/Python-3.5.9/Lib/test/test_gc.py
|
Python
|
apache-2.0
| 34,167
|
[
"VisIt"
] |
8923cb530f154fcb1a53f90168e299d7ce07fef28385c2d5d1e641fe3eb7d9ab
|
# -*- coding: utf-8 -*-
#
# clopath_synapse_small_network.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Clopath Rule: Bidirectional connections
---------------------------------------
This script simulates a small network of ten excitatory and three
inhibitory ``aeif_psc_delta_clopath`` neurons. The neurons are randomly connected
and driven by 500 Poisson generators. The synapses from the Poisson generators
to the excitatory population and those among the neurons of the network
are Clopath synapses. The rate of the Poisson generators is modulated with
a Gaussian profile whose center shifts randomly each 100 ms between ten
equally spaced positions.
This setup demonstrates that the Clopath synapse is able to establish
bidirectional connections. The example is adapted from [1]_ (cf. fig. 5).
References
~~~~~~~~~~
.. [1] Clopath C, Büsing L, Vasilaki E, Gerstner W (2010). Connectivity reflects coding:
a model of voltage-based STDP with homeostasis.
Nature Neuroscience 13:3, 344--352
"""
import nest
import numpy as np
import matplotlib.pyplot as plt
import random
##############################################################################
# Set the parameters
simulation_time = 1.0e4
resolution = 0.1
delay = resolution
# Poisson_generator parameters
pg_A = 30. # amplitude of Gaussian
pg_sigma = 10. # std deviation
nest.ResetKernel()
nest.resolution = resolution
# Create neurons and devices
nrn_model = 'aeif_psc_delta_clopath'
nrn_params = {'V_m': -30.6,
'g_L': 30.0,
'w': 0.0,
'tau_plus': 7.0,
'tau_minus': 10.0,
'tau_w': 144.0,
'a': 4.0,
'C_m': 281.0,
'Delta_T': 2.0,
'V_peak': 20.0,
't_clamp': 2.0,
'A_LTP': 8.0e-6,
'A_LTD': 14.0e-6,
'A_LTD_const': False,
'b': 0.0805,
'u_ref_squared': 60.0**2}
pop_exc = nest.Create(nrn_model, 10, nrn_params)
pop_inh = nest.Create(nrn_model, 3, nrn_params)
##############################################################################
# We need parrot neurons since Poisson generators can only be connected
# with static connections
pop_input = nest.Create('parrot_neuron', 500) # helper neurons
pg = nest.Create('poisson_generator', 500)
wr = nest.Create('weight_recorder')
##############################################################################
# First connect Poisson generators to helper neurons
nest.Connect(pg, pop_input, 'one_to_one', {'synapse_model': 'static_synapse',
'weight': 1.0, 'delay': delay})
##############################################################################
# Create all the connections
nest.CopyModel('clopath_synapse', 'clopath_input_to_exc',
{'Wmax': 3.0})
conn_dict_input_to_exc = {'rule': 'all_to_all'}
syn_dict_input_to_exc = {'synapse_model': 'clopath_input_to_exc',
'weight': nest.random.uniform(0.5, 2.0),
'delay': delay}
nest.Connect(pop_input, pop_exc, conn_dict_input_to_exc,
syn_dict_input_to_exc)
# Create input->inh connections
conn_dict_input_to_inh = {'rule': 'all_to_all'}
syn_dict_input_to_inh = {'synapse_model': 'static_synapse',
'weight': nest.random.uniform(0.0, 0.5),
'delay': delay}
nest.Connect(pop_input, pop_inh, conn_dict_input_to_inh, syn_dict_input_to_inh)
# Create exc->exc connections
nest.CopyModel('clopath_synapse', 'clopath_exc_to_exc',
{'Wmax': 0.75, 'weight_recorder': wr})
syn_dict_exc_to_exc = {'synapse_model': 'clopath_exc_to_exc', 'weight': 0.25,
'delay': delay}
conn_dict_exc_to_exc = {'rule': 'all_to_all', 'allow_autapses': False}
nest.Connect(pop_exc, pop_exc, conn_dict_exc_to_exc, syn_dict_exc_to_exc)
# Create exc->inh connections
syn_dict_exc_to_inh = {'synapse_model': 'static_synapse',
'weight': 1.0, 'delay': delay}
conn_dict_exc_to_inh = {'rule': 'fixed_indegree', 'indegree': 8}
nest.Connect(pop_exc, pop_inh, conn_dict_exc_to_inh, syn_dict_exc_to_inh)
# Create inh->exc connections
syn_dict_inh_to_exc = {'synapse_model': 'static_synapse',
'weight': 1.0, 'delay': delay}
conn_dict_inh_to_exc = {'rule': 'fixed_outdegree', 'outdegree': 6}
nest.Connect(pop_inh, pop_exc, conn_dict_inh_to_exc, syn_dict_inh_to_exc)
##############################################################################
# Randomize the initial membrane potential
pop_exc.V_m = nest.random.normal(-60., 25.)
pop_inh.V_m = nest.random.normal(-60., 25.)
##############################################################################
# Simulation divided into intervals of 100ms for shifting the Gaussian
sim_interval = 100.
for i in range(int(simulation_time / sim_interval)):
# set rates of poisson generators
rates = np.empty(500)
# pg_mu will be randomly chosen out of 25,75,125,...,425,475
pg_mu = 25 + random.randint(0, 9) * 50
for j in range(500):
rates[j] = pg_A * np.exp((-1 * (j - pg_mu)**2) / (2 * pg_sigma**2))
pg[j].rate = rates[j] * 1.75
nest.Simulate(sim_interval)
##############################################################################
# Plot results
fig, ax = plt.subplots(1, sharex=False)
# Plot synapse weights of the synapses within the excitatory population
# Sort weights according to sender and reshape
exc_conns = nest.GetConnections(pop_exc, pop_exc)
exc_conns_senders = np.array(exc_conns.source)
exc_conns_targets = np.array(exc_conns.target)
exc_conns_weights = np.array(exc_conns.weight)
idx_array = np.argsort(exc_conns_senders)
targets = np.reshape(exc_conns_targets[idx_array], (10, 10 - 1))
weights = np.reshape(exc_conns_weights[idx_array], (10, 10 - 1))
# Sort according to target
for i, (trgs, ws) in enumerate(zip(targets, weights)):
idx_array = np.argsort(trgs)
weights[i] = ws[idx_array]
weight_matrix = np.zeros((10, 10))
tu9 = np.triu_indices_from(weights)
tl9 = np.tril_indices_from(weights, -1)
tu10 = np.triu_indices_from(weight_matrix, 1)
tl10 = np.tril_indices_from(weight_matrix, -1)
weight_matrix[tu10[0], tu10[1]] = weights[tu9[0], tu9[1]]
weight_matrix[tl10[0], tl10[1]] = weights[tl9[0], tl9[1]]
# Difference between initial and final value
init_w_matrix = np.ones((10, 10)) * 0.25
init_w_matrix -= np.identity(10) * 0.25
cax = ax.imshow(weight_matrix - init_w_matrix)
cbarB = fig.colorbar(cax, ax=ax)
ax.set_xticks([0, 2, 4, 6, 8])
ax.set_xticklabels(['1', '3', '5', '7', '9'])
ax.set_yticks([0, 2, 4, 6, 8])
ax.set_xticklabels(['1', '3', '5', '7', '9'])
ax.set_xlabel("to neuron")
ax.set_ylabel("from neuron")
ax.set_title("Change of syn weights before and after simulation")
plt.show()
|
sdiazpier/nest-simulator
|
pynest/examples/clopath_synapse_small_network.py
|
Python
|
gpl-2.0
| 7,481
|
[
"Gaussian",
"NEURON"
] |
3b0878d2f229ced0dd7675cf9b8b1898fc50337fd9361efa2ca70c4431c44914
|
# This file is part of the Fluggo Media Library for high-quality
# video and audio processing.
#
# Copyright 2010-1 Brian J. Crowell <brian@fluggo.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import yaml, collections, itertools, functools
from fluggo import ezlist, sortlist, signal
from fluggo.media import process
@functools.total_ordering
class _ZSortKey():
__slots__ = ('item', 'overlaps', 'y', 'z')
def __init__(self, item, overlaps, y, z):
self.item = item
self.y = y
self.z = z
def __eq__(self, other):
if other.item in self.item.overlap_items():
if self.z == other.z:
return True
return self.y == other.y
def __lt__(self, other):
if other.item in self.item.overlap_items():
if other.z < self.z:
return True
return other.y < self.y
def __le__(self, other):
if other.item in self.item.overlap_items():
if other.z <= self.z:
return True
return other.y <= self.y
def __str__(self):
return 'key(y={0.y}, z={0.z})'.format(self)
class Anchor:
'''
An anchor on one clip says that its position should be fixed in relation
to another. In the Y direction, the Y offset is kept in memory, but not saved;
each item's position is enough to establish the offset.
In the X (time) direction, if each clip is using a different time scale, the
time offset can be different depending on where each item appears in the
canvas. Therefore we establish a fixed offset here based on :attr:`offset_ns`,
the offset from the beginning of the target clip (not the beginning of the
target's source) to the beginning of the source (anchored) clip.
This won't work out exactly most of the time; the scene will round to the
nearest position in those cases.
The attribute :attr:`visible` determines whether the anchor deserves displaying
an explicit link between the clips. If two_way is True, then the anchor acts more
like groups found in other editors-- both clips are moved if either one is.
'''
yaml_tag = '!CanvasAnchor'
def __init__(self, target=None, offset_ns=0,
visible=False, two_way=False):
self._target = target
self._offset_ns = int(offset_ns)
self.y_offset = 0.0
self._visible = bool(visible)
self._two_way = bool(two_way)
def _create_repr_dict(self):
result = {
'target': self._target
}
if self._offset_ns:
result['offset_ns'] = self._offset_ns
if self._visible:
result['visible'] = self._visible
if self._two_way:
result['two_way'] = self._two_way
return result
@classmethod
def to_yaml(cls, dumper, data):
return dumper.represent_mapping(cls.yaml_tag, data._create_repr_dict())
@classmethod
def from_yaml(cls, loader, node):
return cls(**loader.construct_mapping(node))
@classmethod
def get_y_position(cls, item):
'''Return the Y position of the item.'''
if isinstance(item, SequenceItem):
return item.sequence.y
else:
return item.y
def get_y_offset(self, source):
'''Return the current offset from *target* to *source*.'''
return Anchor.get_y_position(source) - Anchor.get_y_position(self.target)
def get_desired_x(self, source):
'''Return the desired position for the *source*. The returned position
will be absolute time in source frames.'''
target_rate = self.target.space.rate(self.target.type())
source_rate = source.space.rate(source.type())
# Target time for the item
target_x = process.get_frame_time(target_rate, self.target.abs_x) + self._offset_ns
# get_time_frame floors the result; since we want rounding behavior, we
# add half a frame
target_x += process.get_frame_time(source_rate * 2, 1)
return process.get_time_frame(source_rate, target_x)
def get_desired_y(self):
return Anchor.get_y_position(self.target) + self.y_offset
def clone(self, target=None):
result = self.__class__(**self._create_repr_dict())
result.y_offset = self.y_offset
if target:
result._target = target
return result
@property
def target(self):
return self._target
@property
def offset_ns(self):
return self._offset_ns
@property
def visible(self):
return self._visible
@property
def two_way(self):
return self._two_way
class Item(object):
'''
Class for all items that can appear in the canvas.
All of the arguments for the constructor are the YAML properties that can appear
for this class.
'''
yaml_tag = '!CanvasItem'
def __init__(self, x=0, y=0.0, length=1, height=1.0, type=None, anchor=None, tags=None,
ease_in=0, ease_out=0, ease_in_type=None, ease_out_type=None, in_motion=False):
self._space = None
self._x = x
self._y = y
self._z = 0
self._height = height
self._length = length
self._type = type
self._ease_in_type = ease_in_type
self._ease_in = ease_in
self._ease_out_type = ease_out_type
self._ease_out = ease_out
self.updated = signal.Signal()
self._anchor = anchor
self._tags = set(tags) if tags else set()
self.in_motion = in_motion
def clone(self):
return self.__class__(**self._create_repr_dict())
def _create_repr_dict(self):
result = {
'x': self._x, 'y': self._y,
'length': self._length, 'height': self._height,
'type': self._type
}
if self._anchor:
result['anchor'] = self._anchor
if self._ease_in:
result['ease_in'] = self._ease_in
if self._ease_in_type:
result['ease_in_type'] = self._ease_in_type
if self._ease_out:
result['ease_out'] = self._ease_out
if self._ease_out_type:
result['ease_out_type'] = self._ease_out_type
if self._tags:
result['tags'] = list(self._tags)
return result
@classmethod
def to_yaml(cls, dumper, data):
return dumper.represent_mapping(cls.yaml_tag, data._create_repr_dict())
@classmethod
def from_yaml(cls, loader, node):
return cls(**loader.construct_mapping(node))
@property
def tags(self):
return frozenset(self._tags)
@property
def x(self):
return self._x
@property
def abs_x(self):
return self._x
@property
def anchor(self):
return self._anchor
@property
def y(self):
return self._y
@property
def z(self):
return self._z
@property
def length(self):
return self._length
@property
def height(self):
return self._height
@property
def space(self):
return self._space
@property
def anchor_target(self):
if self.anchor:
return self.anchor.target
if self.space:
# Check for two-way anchors
for item in self.space.find_immediate_anchored_items(self):
if item.anchor and item.anchor.target == self and item.anchor.two_way:
return item
return None
def z_sort_key(self, y=None, z=None):
'''
Get an object that can be used to sort items in video overlay order. *y* and *z*
alter the key.
'''
return _ZSortKey(self, self.overlap_items(), self._y if y is None else y, self._z if z is None else z)
def overlaps(self, other):
'''
Return True if *other* overlaps this item.
'''
if self.x >= (other.x + other.length) or (self.x + self.length) <= other.x:
return False
if self.y >= (other.y + other.height) or (self.y + self.height) <= other.y:
return False
return True
def update(self, **kw):
'''
Update the attributes of this item.
'''
if 'x' in kw:
self._x = int(kw['x'])
if 'length' in kw:
self._length = int(kw['length'])
if 'y' in kw:
self._y = float(kw['y'])
if 'height' in kw:
self._height = float(kw['height'])
if 'z' in kw:
self._z = int(kw['z'])
if 'in_motion' in kw:
self.in_motion = bool(kw['in_motion'])
if 'anchor' in kw:
if self._anchor and self._space:
self._space.remove_anchor_map(self, self._anchor.target)
if self._anchor.two_way:
self._space.remove_anchor_map(self._anchor.target, self)
self._anchor = kw['anchor']
if self._anchor and self._space:
self._space.add_anchor_map(self, self._anchor.target)
if self._anchor.two_way:
self._space.add_anchor_map(self._anchor.target, self)
self.updated(**kw)
def overlap_items(self):
'''
Get a list of all items that directly or indirectly overlap this one.
'''
return self._space.find_overlaps_recursive(self)
def kill(self):
if self._anchor:
self._space.remove_anchor_map(self, self._anchor.target)
if self._anchor.two_way:
self._space.remove_anchor_map(self._anchor.target, self)
self._space = None
def fixup(self):
'''
Perform initialization that has to wait until deserialization is finished.
'''
if self._anchor:
self._space.add_anchor_map(self, self._anchor.target)
if self._anchor.two_way:
self._space.add_anchor_map(self._anchor.target, self)
self._anchor.y_offset = self._anchor.get_y_offset(self)
def type(self):
'''
The type of the item, such as ``'audio'`` or ``'video'``.
'''
return self._type
def split(self, offset):
'''
Split the item *offset* frames from its start, putting two (new) items in
its place in the scene list.
'''
raise NotImplementedError
def can_join(self, other):
return False
def join(self, other):
raise NotImplementedError
class Clip(Item):
'''
A freestanding video or audio clip.
'''
yaml_tag = '!CanvasClip'
def __init__(self, type=None, offset=0, source=None, **kw):
Item.__init__(self, **kw)
self._type = type
self._source = source
self._offset = offset
def _create_repr_dict(self):
dict = Item._create_repr_dict(self)
dict['offset'] = self._offset
if self._source:
dict['source'] = self._source
return dict
def update(self, **kw):
'''
Update the attributes of this item.
'''
if 'offset' in kw:
self._offset = int(kw['offset'])
if 'source' in kw:
self._source = kw['source']
Item.update(self, **kw)
@property
def source(self):
return self._source
@property
def offset(self):
return self._offset
class PlaceholderItem(Item):
def __init__(self, copy):
Item.__init__(self,
x=copy.x,
y=copy.y,
length=copy.length,
height=copy.height,
type=copy.type())
def _create_repr_dict(self):
raise NotImplementedError
class Sequence(Item, ezlist.EZList):
yaml_tag = '!CanvasSequence'
def __init__(self, type=None, items=None, expanded=False, **kw):
Item.__init__(self, **kw)
ezlist.EZList.__init__(self)
self._type = type
self._items = items if items is not None else []
self._expanded = expanded
# Signal with signature signal(item)
self.item_added = signal.Signal()
# Signal with signature signal(start, stop)
self.items_removed = signal.Signal()
#: Signal with signature item_updated(item, **kw)
self.item_updated = signal.Signal()
if items:
self.fixup()
def _create_repr_dict(self):
dict_ = Item._create_repr_dict(self)
dict_['type'] = self._type
dict_['items'] = list(self._items)
dict_['expanded'] = self._expanded
del dict_['length']
return dict_
def type(self):
return self._type
@property
def expanded(self):
return self._expanded
def __getitem__(self, index):
return self._items[index]
def __len__(self):
return len(self._items)
def __iter__(self):
return self._items.__iter__()
def _replace_range(self, start, stop, items):
old_item_set = frozenset(self._items[start:stop])
new_item_set = frozenset(items)
for item in sorted(old_item_set - new_item_set, key=lambda a: -a.index):
self._length -= item.length - item.transition_length
if item.index == 0:
self._length -= item.transition_length
item.kill()
if stop > start:
self._items[start:stop] = []
self._update_marks(start, stop, 0)
# Reset the x values once
x = 0
if start > 0:
prev_item = self._items[start - 1]
x = prev_item._x + prev_item.length
for i, item in enumerate(self._items[start:], start):
item._sequence = self
item._x = x - item.transition_length
x += item.length - item.transition_length
self.items_removed(start, stop)
self._items[start:start] = items
self._update_marks(start, start, len(items))
# Reset the x values again
x = 0
if start > 0:
prev_item = self._items[start - 1]
x = prev_item._x + prev_item.length
for i, item in enumerate(self._items[start:], start):
item._sequence = self
item._x = x - item.transition_length
x += item.length - item.transition_length
item.fixup()
# Send item_added notifications
for item in (new_item_set - old_item_set):
self._length += item.length - item.transition_length
if item.index == 0:
self._length += item.transition_length
self.item_added(item)
# Send x updates
for item in self._items[start:]:
self.item_updated(item, x=item._x)
Item.update(self, length=self._length)
def _move_items(self, start_index, xdiff, lendiff):
if xdiff:
item = self._items[start_index]
item._x += xdiff
self.item_updated(item, x=item._x)
for item in self._items[start_index + 1:]:
item._x += xdiff + lendiff
self.item_updated(item, x=item._x)
self.update(length=self.length + xdiff + lendiff)
def fixup(self):
Item.fixup(self)
self._items = sortlist.AutoIndexList(self._items, index_attr='_index')
# Count up the proper length and set it on the item
total_length = len(self) and self[0].transition_length or 0
for item in self._items:
item._sequence = self
item._type = self._type
item._x = total_length - item.transition_length
total_length += item.length - item.transition_length
item.fixup()
Item.update(self, length=total_length)
class SequenceItem(object):
yaml_tag = '!CanvasSequenceItem'
def __init__(self, source=None, offset=0, length=1, transition=None,
transition_length=0, type=None, in_motion=False, anchor=None):
if length < 1:
raise ValueError('length cannot be less than 1 ({0} was given)'.format(length))
self._source = source
self._offset = offset
self._length = length
self._transition = transition
self._transition_length = transition_length
self._sequence = None
self._index = None
self._type = type
self._x = 0
self._anchor = anchor
self.in_motion = in_motion
def clone(self):
clone = self.__class__(**self._create_repr_dict())
clone._type = self._type
clone._x = self._x
clone._index = self._index
return clone
def update(self, **kw):
'''
Update the attributes of this item.
'''
xdiff = 0
lendiff = 0
if 'source' in kw:
self._source = kw['source']
if 'offset' in kw:
self._offset = int(kw['offset'])
if 'length' in kw:
new_length = int(kw['length'])
if new_length < 1:
raise ValueError('length cannot be less than 1 ({0} was given)'.format(new_length))
lendiff += new_length - self._length
self._length = new_length
if 'in_motion' in kw:
self.in_motion = bool(kw['in_motion'])
if 'anchor' in kw:
if self._anchor and self._sequence and self._sequence._space:
self._sequence._space.remove_anchor_map(self, self._anchor.target)
if self._anchor.two_way:
self._sequence._space.remove_anchor_map(self._anchor.target, self)
self._anchor = kw['anchor']
if self._anchor and self._sequence and self._sequence._space:
self._sequence._space.add_anchor_map(self, self._anchor.target)
if self._anchor.two_way:
self._sequence._space.add_anchor_map(self._anchor.target, self)
if 'transition' in kw:
self._transition = kw['transition']
if 'transition_length' in kw:
new_length = int(kw['transition_length'])
xdiff -= new_length - self._transition_length
self._transition_length = new_length
if self._sequence:
if xdiff or lendiff:
self._sequence._move_items(self._index, xdiff, lendiff)
self._sequence.item_updated(self, **kw)
@property
def source(self):
return self._source
@property
def offset(self):
return self._offset
@property
def length(self):
return self._length
@property
def transition(self):
return self._transition
@property
def anchor(self):
return self._anchor
@property
def transition_length(self):
'''The length of the transition preceding this clip, if any. Zero means a cut, and a
positive number gives the length of the transition. A negative number indicates a gap
between the previous clip and this one. The first clip in a sequence should have a
transition_length of zero.'''
return self._transition_length
@property
def index(self):
return self._index
@property
def sequence(self):
return self._sequence
@property
def x(self):
return self._x
@property
def abs_x(self):
return self._x + self._sequence.x
def type(self):
return self._type
def previous_item(self, skip_in_motion=False):
'''Gets the previous item, or None if there isn't one.
If skip_in_motion is True, skips over in_motion items.'''
item = self
while item.index > 0:
item = item.sequence[item.index - 1]
if skip_in_motion and item.in_motion:
continue
return item
def next_item(self, skip_in_motion=False):
'''Gets the next item, or None if there isn't one.
If skip_in_motion is True, skips over in_motion items.'''
item = self
while item.index < len(item.sequence) - 1:
item = item.sequence[item.index + 1]
if skip_in_motion and item.in_motion:
continue
return item
def _create_repr_dict(self):
mapping = {'source': self._source,
'offset': self._offset, 'length': self._length}
if self._transition_length:
mapping['transition_length'] = self._transition_length
if self._transition:
mapping['transition'] = self._transition
return mapping
@classmethod
def to_yaml(cls, dumper, data):
return dumper.represent_mapping(cls.yaml_tag, data._create_repr_dict())
@classmethod
def from_yaml(cls, loader, node):
return cls(**loader.construct_mapping(node))
def kill(self):
if self._anchor and self._sequence._space:
self._sequence._space.remove_anchor_map(self, self._anchor.target)
if self._anchor.two_way:
self._sequence._space.remove_anchor_map(self._anchor.target, self)
self._sequence = None
self._index = None
def fixup(self):
if self._anchor and self._sequence._space:
self._sequence._space.add_anchor_map(self, self._anchor.target)
if self._anchor.two_way:
self._sequence._space.add_anchor_map(self._anchor.target, self)
self._anchor.y_offset = self._anchor.get_y_offset(self)
def __str__(self):
return yaml.dump(self)
def _yamlreg(cls):
yaml.add_representer(cls, cls.to_yaml)
yaml.add_constructor(cls.yaml_tag, cls.from_yaml)
_yamlreg(Anchor)
_yamlreg(Item)
_yamlreg(Clip)
_yamlreg(Sequence)
_yamlreg(SequenceItem)
|
fluggo/Canvas
|
fluggo/editor/model/items.py
|
Python
|
gpl-3.0
| 22,350
|
[
"Brian"
] |
2f48d3dcffcd4210a03025d315ae5b21c46b5c6519f1f79216e5776b55b22e12
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017, the cclib development team
#
# This file is part of cclib (http://cclib.github.io) and is distributed under
# the terms of the BSD 3-Clause License.
import argparse
import logging
import os.path
import sys
from cclib.parser import ccData
from cclib.io import ccopen
from cclib.io import ccwrite
def main():
parser = argparse.ArgumentParser()
parser.add_argument('outputtype',
choices=('json', 'cjson', 'cml', 'xyz', 'molden', 'wfx'),
help='the output format to write (json/cjson are identical)')
parser.add_argument('compchemlogfile',
nargs='+',
help='one or more computational chemistry output files to parse and convert')
parser.add_argument('-v', '--verbose',
action='store_true',
help='more verbose parsing output (only errors by default)')
parser.add_argument('-g', '--ghost',
type=str,
default=None,
help='Symbol to use for ghost atoms')
parser.add_argument('-t', '--terse',
action='store_true',
help='CJSON by default is not indented for readability, saves space (indented for readability\'s sake)')
parser.add_argument('-u', '--future',
action='store_true',
help='use experimental features (currently optdone_as_list)')
parser.add_argument('-i', '--index',
type=int,
default=None,
help='optional zero-based index for which structure to extract')
args = parser.parse_args()
outputtype = args.outputtype
filenames = args.compchemlogfile
verbose = args.verbose
terse = args.terse
future = args.future
index = args.index
ghost = args.ghost
for filename in filenames:
# We might want to use this option in the near future.
ccopen_kwargs = dict()
if future:
ccopen_kwargs['future'] = True
print("Attempting to parse {}".format(filename))
log = ccopen(filename, **ccopen_kwargs)
if not log:
print("Cannot figure out what type of computational chemistry output file '{}' is.".format(filename))
print("Report this to the cclib development team if you think this is an error.")
sys.exit()
if verbose:
log.logger.setLevel(logging.INFO)
else:
log.logger.setLevel(logging.ERROR)
data = log.parse()
print("cclib can parse the following attributes from {}:".format(filename))
hasattrs = [' {}'.format(attr) for attr in ccData._attrlist if hasattr(data, attr)]
print('\n'.join(hasattrs))
# Write out to disk.
outputdest = '.'.join([os.path.splitext(os.path.basename(filename))[0], outputtype])
ccwrite_kwargs = dict()
if future:
ccwrite_kwargs['future'] = True
if ghost:
ccwrite_kwargs['ghost'] = ghost
# For XYZ files, write the last geometry unless otherwise
# specified.
if not index:
index = -1
ccwrite_kwargs['jobfilename'] = filename
# The argument terse presently is only applicable to
# CJSON/JSON formats
ccwrite(data, outputtype, outputdest, indices=index, terse=terse,
**ccwrite_kwargs)
if __name__ == "__main__":
main()
|
berquist/cclib
|
cclib/scripts/ccwrite.py
|
Python
|
bsd-3-clause
| 3,590
|
[
"cclib"
] |
be262ce75ac24ba455d4d604a8936b72b8e5d2bd68ebf11aa9e95d8b741ffe75
|
#!/usr/bin/env python
"""
obsgen.py
State Estimation and Analysis for PYthon
Module to process observations:
obsgen : class to convert from raw to ROMS observations using
specific subclasses
Written by Brian Powell on 08/15/15
Copyright (c)2017 University of Hawaii under the BSD-License.
"""
import numpy as np
import netCDF4
import seapy
import datetime
from warnings import warn
def error_profile(obs, depth, error, provenance=None):
"""
Apply a vertical error profile to a given observation structure.
This allows for error minimums to vary by depth and observation
type.
Parameters
----------
obs : seapy.roms.obs.obs or string,
The observations to enforce the error profile upon.
depth : ndarray,
Array of depths for the errors provided
error : dict,
Dictionary of the errors, where the key is the type of observation
(as defined by seapy.roms.obs.obs_types) and the value is
an ndarray of same length as depth with the error [in squared units]
of the observation profile.
provenance : list of int or string, optional,
The provenance to apply the errors to (ignore other observations
of the same type, but different instrument)
Returns
-------
None:
The obs structure is mutable is changed in place
Examples
--------
>>> obs = obs('observation_file.nc')
>>> depth = [10, 30, 50, 1000, 2000]
>>> error['temp'] = [0.5, 0.2, 0.4, 0.1, 0.01]
>>> error_profile(obs, depth, error)
The resulting 'obs' class will have had its error profiles
modified.
"""
from scipy.interpolate import interp1d
obs = seapy.roms.obs.asobs(obs)
depth = np.atleast_1d(depth).flatten()
depth = np.abs(depth)
pro = seapy.roms.obs.asprovenance(provenance) if provenance else None
# Loop over all of the profiles in the error dictionary and
# apply them to the observations
for var in error:
typ = seapy.roms.obs.astype(var)
try:
fint = interp1d(depth, error[var].flatten(), copy=False)
if pro.any():
l = np.where(np.logical_and(obs.type == typ,
np.in1d(obs.provenance, pro)))
else:
l = np.where(np.logical_and(obs.type == typ, obs.depth < 0))
nerr = fint(np.abs(obs.depth[l]))
obs.error[l] = np.maximum(obs.error[l], nerr)
except ValueError:
warn("Error for {:s} is the wrong size".format(var))
continue
pass
def add_ssh_tides(obs, tide_file, tide_error, tide_start=None, provenance=None,
reftime=seapy.default_epoch):
"""
Apply predicted barotropic tides to the SSH values of given observations
using the tide_file given.
Parameters
----------
obs : seapy.roms.obs.obs or string,
The observations to enforce the error profile upon.
tide_file : string,
The name of the ROMS tidal forcing file to use for predicting the
barotropic tides.
tide_error : np.masked_array
A two dimensional array of the tidal fit errors to apply to
the ssh errors when adding the tides. This should be the same size
as the rho-grid. The units of the error must be in meters. If it is
masked, the mask will be honored and obs that are in the mask will
be removed. This allows you to filter on regions of high error.
tide_start : bool, optional,
If given, the tide_start of the tide file. If not specified,
will read the attribute of the tidal forcing file
provenance : list of int or string, optional,
The provenance to apply the tides to (ignore other observations
of the same type, but different instrument)
reftime: datetime,
Reference time for the observation times
Returns
-------
None:
The obs structure is mutable is changed in place
Examples
--------
>>> obs = obs('observation_file.nc')
>>> add_ssh_tides(obs, 'tide_frc.nc', errmap)
The resulting 'obs' variable will have modified data. To save it:
>>> obs.to_netcdf()
"""
# Load tidal file data
frc = seapy.roms.tide.load_forcing(tide_file)
if not tide_start:
tide_start = frc['tide_start']
# Make sure that the sizes are the same
if frc['Eamp'].shape[1:] != tide_error.shape:
raise ValueError(
"The error array is not the same size as the tidal grid")
# Gather the observations that need tidal information
obs = seapy.roms.obs.asobs(obs)
pro = seapy.roms.obs.asprovenance(provenance) if provenance else None
if pro:
l = np.where(np.logical_and(obs.type == 1,
np.in1d(obs.provenance, pro)))
else:
l = np.where(obs.type == 1)
# If we have any, then do tidal predictions and add the signal
# and error to the observations
bad = []
if l[0].any():
ox = np.rint(obs.x[l]).astype(int)
oy = np.rint(obs.y[l]).astype(int)
idx = seapy.unique_rows((ox, oy))
for cur in seapy.progressbar.progress(idx):
pts = np.where(np.logical_and(ox == ox[cur], oy == oy[cur]))
# If this point is masked, remove from the observations
if not tide_error[oy[cur], ox[cur]]:
bad.append(l[0][pts].tolist())
else:
time = [reftime + datetime.timedelta(t) for t in
obs.time[l][pts]]
amppha = seapy.tide.pack_amp_phase(
frc['tides'], frc['Eamp'][:, oy[cur], ox[cur]],
frc['Ephase'][:, oy[cur], ox[cur]])
zpred = seapy.tide.predict(time, amppha,
lat=obs.lat[l][cur],
tide_start=tide_start)
# Add the information to the observations
obs.value[l[0][pts]] += zpred
obs.error[l[0][pts]] = np.maximum(
obs.error[l[0][pts]], tide_error[oy[cur], ox[cur]]**2)
# If any were bad, then remove them
if bad:
obs.delete(seapy.flatten(bad))
pass
class obsgen(object):
def __init__(self, grid, dt, reftime=seapy.default_epoch):
"""
class for abstracting the processing of raw observation files
(satellite, in situ, etc.) into ROMS observations files. All
processing has commonalities which this class encapsulates, while
leaving the loading and translation of individual data formats
to subclasses.
Parameters
----------
grid: seapy.model.grid or string,
grid to use for generating observations
dt: float,
Model time-step or greater in units of days
epoch: datetime, optional,
Time to reference all observations from
Returns
-------
None
"""
self.grid = seapy.model.asgrid(grid)
self.dt = dt
self.epoch = reftime
def convert_file(self, file, title=None):
"""
convert a raw observation file into a ROMS observations structure.
The subclasses are responsible for the conversion, and this method
is obsgen is only a stub.
Parameters
----------
file : string,
filename of the file to process
title : string,
Title to give the new observation structure global attribute
Returns
-------
seapy.roms.obs.obs,
observation structure from raw obs
"""
pass
def batch_files(self, in_files, out_files, clobber=True):
"""
Given a list of input files, process each one and save each result
into the given output file.
Parameters
----------
in_files : list of strings,
filenames of the files to process
out_files : list of strings,
filenames of the files to create for each of the input filenames.
If a single string is given, the character '#' will be replaced
by the starting time of the observation (e.g. out_files="out_#.nc"
will become out_03234.nc)
clobber : bool, optional
If TRUE, overwrite any existing output files. If False, the
file is not processed.
Returns
-------
None
"""
import re
import os
outtime = False
if isinstance(out_files, str):
outtime = True
time = re.compile('\#')
for n, file in enumerate(in_files):
try:
print(file)
obs = self.convert_file(file)
if obs is None:
continue
if outtime:
ofile = time.sub("{:05d}".format(int(obs.time[0])),
out_files)
else:
ofile = out_files[n]
if clobber:
obs.to_netcdf(ofile, True)
else:
for i in "abcdefgh":
if os.path.isfile(ofile):
ofile = re.sub("[a-h]{0,1}\.nc", i + ".nc", ofile)
else:
break
obs.to_netcdf(ofile, False)
except (BaseException, UserWarning) as e:
warn("WARNING: {:s} cannot be processed.\nError: {:}".format(
file, e.args))
pass
##############################################################################
#
# REMOTE-SENSING DATA
#
##############################################################################
class aquarius_sss(obsgen):
"""
class to process Aquarius SSS HDF5 files into ROMS observation
files. This is a subclass of seapy.roms.genobs.genobs, and handles
the loading of the data.
"""
def __init__(self, grid, dt, reftime=seapy.default_epoch, salt_limits=None,
salt_error=0.2):
if salt_limits is None:
self.salt_limits = (10, 36)
else:
self.salt_limits = salt_limits
self.salt_error = salt_error
super().__init__(grid, dt, reftime)
def convert_file(self, file, title="AQUARIUS Obs"):
"""
Load an Aquarius file and convert into an obs structure
"""
import h5py
f = h5py.File(file, 'r')
salt = np.ma.masked_equal(np.flipud(f['l3m_data'][:]),
f['l3m_data'].attrs['_FillValue'])
year = f.attrs['Period End Year']
day = f.attrs['Period End Day']
nlat = f.attrs['Northernmost Latitude'] - 0.5
slat = f.attrs['Southernmost Latitude'] + 0.5
wlon = f.attrs['Westernmost Longitude'] + 0.5
elon = f.attrs['Easternmost Longitude'] - 0.5
dlat = f.attrs['Latitude Step']
dlon = f.attrs['Longitude Step']
f.close()
[lon, lat] = np.meshgrid(np.arange(wlon, elon + dlon, dlon),
np.arange(slat, nlat + dlat, dlat))
time = (datetime.datetime(year, 1, 1) + datetime.timedelta(int(day)) -
self.epoch).days
lat = lat.flatten()
lon = lon.flatten()
if self.grid.east():
lon[lon < 0] += 360
salt = np.ma.masked_outside(salt.flatten(), self.salt_limits[0],
self.salt_limits[1])
data = [seapy.roms.obs.raw_data("SALT", "SSS_AQUARIUS",
salt, None, self.salt_error)]
# Grid it
return seapy.roms.obs.gridder(self.grid, time, lon, lat, None,
data, self.dt, title)
pass
class aviso_sla_map(obsgen):
"""
class to process AVISO SLA map netcdf files into ROMS observation
files. This is a subclass of seapy.roms.genobs.genobs, and handles
the loading of the data.
"""
def __init__(self, grid, dt, reftime=seapy.default_epoch, ssh_mean=None,
ssh_error=0.05):
if ssh_mean is not None:
self.ssh_mean = seapy.convolve_mask(ssh_mean, ksize=5, copy=True)
else:
self.ssh_mean = None
self.ssh_error = ssh_error
super().__init__(grid, dt, reftime)
def convert_file(self, file, title="AVISO Obs"):
"""
Load an AVISO file and convert into an obs structure
"""
# Load AVISO Data
nc = seapy.netcdf(file)
lonname = 'lon' if 'lon' in nc.variables.keys() else 'longitude'
lon = nc.variables[lonname][:]
latname = 'lat' if 'lat' in nc.variables.keys() else 'latitude'
lat = nc.variables[latname][:]
dat = np.squeeze(nc.variables["sla"][:])
err = np.squeeze(nc.variables["err"][:])
time = netCDF4.num2date(nc.variables["time"][0],
nc.variables["time"].units) - self.epoch
time = time.total_seconds() * seapy.secs2day
nc.close()
lon, lat = np.meshgrid(lon, lat)
lat = lat.flatten()
lon = lon.flatten()
if not self.grid.east():
lon[lon > 180] -= 360
data = [seapy.roms.obs.raw_data("ZETA", "SSH_AVISO_MAP",
dat.flatten(), err.flatten(), self.ssh_error)]
# Grid it
obs = seapy.roms.obs.gridder(self.grid, time, lon, lat, None,
data, self.dt, title)
# Apply the model mean ssh to the sla data
if self.ssh_mean is not None:
m, p = seapy.oasurf(self.grid.I, self.grid.J, self.ssh_mean,
obs.x, obs.y, nx=1, ny=1, weight=7)
obs.value += m
return obs
_aviso_sla_errors = {
"SSH_AVISO_ENVISAT": 0.06,
"SSH_AVISO_JASON1": 0.05,
"SSH_AVISO_JASON2": 0.05,
"SSH_AVISO_JASON3": 0.05,
"SSH_AVISO_GFO": 0.05,
"SSH_AVISO_ALTIKA": 0.07,
"SSH_AVISO_CRYOSAT2": 0.07,
"SSH_AVISO_HAIYANG": 0.07,
"SSH_AVISO_ERS1": 0.06,
"SSH_AVISO_ERS2": 0.06,
"SSH_AVISO_TOPEX_POSEIDON": 0.05,
"SSH_AVISO_SENTINEL3A": 0.05
}
class aviso_sla_track(obsgen):
"""
class to process AVISO SLA track netcdf files into ROMS observation
files. This is a subclass of seapy.roms.genobs.genobs, and handles
the loading of the data. THIS COVERS ALL SATELLITES/INSTRUMENTS FROM AVISO TRACK:
al, c2, e1, e2, en, enn, g2, h2, j1, j1g, j1n, j2, tp and tpn.
Parameters
----------
ssh_mean : ndarray,
Spatial map of rho-grid shape that contains the model mean SSH
ssh_error: dict, optional
Dictionary of the minimum errors for each satellite. The default
uses the errors defined in _aviso_sla_errors
repeat: int
Number of hours to repeat the track before and after its initial
pass
"""
def __init__(self, grid, dt, reftime=seapy.default_epoch, ssh_mean=None,
ssh_error=None, repeat=3, provenance="SSH"):
self.provenance = provenance.upper()
self.repeat = repeat
self.ssh_error = ssh_error if ssh_error else _aviso_sla_errors
if ssh_mean is not None:
self.ssh_mean = seapy.convolve_mask(ssh_mean, ksize=5, copy=True)
else:
self.ssh_mean = None
super().__init__(grid, dt, reftime)
def convert_file(self, file, title="AVISO SLA Track Obs"):
"""
Load an AVISO file and convert into an obs structure
"""
# Load AVISO Data
nc = seapy.netcdf(file)
lon = nc.variables["longitude"][:]
lat = nc.variables["latitude"][:]
slaname = 'SLA' if 'SLA' in nc.variables.keys() else 'sla_filtered'
dat = nc.variables[slaname][:]
time = seapy.roms.get_time(nc, "time", epoch=self.epoch)
nc.close()
# make them into vectors
lat = lat.ravel()
lon = lon.ravel()
dat = dat.ravel()
err = np.ones(dat.shape) * _aviso_sla_errors.get(self.provenance, 0.1)
if not self.grid.east():
lon[lon > 180] -= 360
good = dat.nonzero()
data = [seapy.roms.obs.raw_data("ZETA", self.provenance,
dat[good], err[good], err[0])]
# Grid it
obs = seapy.roms.obs.gridder(self.grid, time, lon[good], lat[good], None,
data, self.dt, title)
# Apply the model mean ssh to the sla data
if self.ssh_mean is not None and obs is not None:
m, p = seapy.oasurf(self.grid.I, self.grid.J, self.ssh_mean,
obs.x, obs.y, nx=1, ny=1, weight=7)
obs.value += m
# Duplicate the observations before and after as per the repeat
# time unless it is zero
if self.repeat and obs:
prior = obs.copy()
after = obs.copy()
prior.time -= self.repeat / 24
after.time += self.repeat / 24
obs.add(prior)
obs.add(after)
return obs
class ostia_sst_map(obsgen):
"""
class to process OSTIA SST map netcdf files into ROMS observation
files. This is a subclass of seapy.roms.genobs.genobs, and handles
the loading of the data.
"""
def __init__(self, grid, dt, reftime=seapy.default_epoch, temp_error=0.4,
temp_limits=None):
self.temp_error = temp_error
if temp_limits is None:
self.temp_limits = (2, 35)
else:
self.temp_limits = temp_limits
super().__init__(grid, dt, reftime)
def convert_file(self, file, title="OSTIA SST Obs"):
"""
Load an OSTIA file and convert into an obs structure
"""
# Load OSTIA Data
nc = seapy.netcdf(file)
lon = nc.variables["lon"][:]
lat = nc.variables["lat"][:]
dat = np.ma.masked_outside(np.squeeze(
nc.variables["analysed_sst"][:]) - 273.15,
self.temp_limits[0], self.temp_limits[1])
err = np.ma.masked_outside(np.squeeze(
nc.variables["analysis_error"][:]), 0.01, 2.0)
dat[err.mask] = np.ma.masked
time = netCDF4.num2date(nc.variables["time"][0],
nc.variables["time"].units) - self.epoch
time = time.total_seconds() * seapy.secs2day
nc.close()
if self.grid.east():
lon[lon < 0] += 360
lon, lat = np.meshgrid(lon, lat)
good = dat.nonzero()
lat = lat[good]
lon = lon[good]
data = [seapy.roms.obs.raw_data("TEMP", "SST_OSTIA", dat.compressed(),
err[good], self.temp_error)]
# Grid it
return seapy.roms.obs.gridder(self.grid, time, lon, lat, None,
data, self.dt, title)
class navo_sst_map(obsgen):
"""
class to process NAVO SST map netcdf files into ROMS observation
files. This is a subclass of seapy.roms.genobs.genobs, and handles
the loading of the data.
"""
def __init__(self, grid, dt, depth=None, reftime=seapy.default_epoch, temp_error=0.25,
temp_limits=None, provenance="SST_NAVO_MAP"):
self.temp_error = temp_error
self.provenance = provenance.upper()
self.temp_limits = (2, 35) if temp_limits is None else temp_limits
self.depth = 4 if depth is None else np.abs(depth)
super().__init__(grid, dt, reftime)
def convert_file(self, file, title="NAVO SST Obs"):
"""
Load a NAVO map file and convert into an obs structure
"""
import re
import sys
nc = seapy.netcdf(file)
lon = nc.variables["lon"][:]
lat = nc.variables["lat"][:]
dat = np.ma.masked_outside(np.squeeze(nc.variables["analysed_sst"][:]) - 273.15,
self.temp_limits[0], self.temp_limits[1])
err = np.ma.array(np.squeeze(
nc.variables["analysis_error"][:]), mask=dat.mask)
# this is an analyzed product and provides errors as a function
# of space and time directly the temperature is the bulk
# temperature (ie at around 4m depth, below the e-folding depths of
# sunlight in the ocean so the product does not have a diuranl cycle
# (ie you don;t have to worry about hourly variations)
time = netCDF4.num2date(nc.variables["time"][0],
nc.variables["time"].units) - self.epoch
time = time.total_seconds() * seapy.secs2day
nc.close()
# here we set the depth to be 4 m below the surface
if self.grid.east():
lon[lon < 0] += 360
lon, lat = np.meshgrid(lon, lat)
good = dat.nonzero()
lat = lat[good]
lon = lon[good]
data = [seapy.roms.obs.raw_data("TEMP", self.provenance, dat.compressed(),
err[good], self.temp_error)]
# Grid it
obs = seapy.roms.obs.gridder(self.grid, time, lon, lat, None,
data, self.dt, depth_adjust=True, title=title)
obs.z *= 0
obs.depth = -self.depth * np.ones(len(obs.depth))
return obs
class modis_sst_map(obsgen):
"""
class to process MODIS SST map netcdf files into ROMS observation
files. This is a subclass of seapy.roms.genobs.genobs, and handles
the loading of the data.
"""
def __init__(self, grid, dt, reftime=seapy.default_epoch, temp_error=0.5,
temp_limits=None, provenance="SST_MODIS_AQUA"):
self.temp_error = temp_error
self.provenance = provenance.upper()
if temp_limits is None:
self.temp_limits = (2, 35)
else:
self.temp_limits = temp_limits
super().__init__(grid, dt, reftime)
def convert_file(self, file, title="MODIS SST Obs"):
"""
Load an MODIS file and convert into an obs structure
"""
# Load MODIS Data
import re
nc = seapy.netcdf(file)
lon = nc.variables["lon"][:]
lat = nc.variables["lat"][:]
dat = np.ma.masked_outside(nc.variables["sst"][:],
self.temp_limits[0], self.temp_limits[1])
err = np.ones(dat.shape) * self.temp_error
time = seapy.date2day(datetime.datetime.strptime(
re.sub('\.[0-9]+Z$', '', nc.time_coverage_end),
"%Y-%m-%dT%H:%M:%S"), self.epoch)
# Check the data flags
flags = np.ma.masked_not_equal(nc.variables["qual_sst"][:], 0)
dat[flags.mask] = np.ma.masked
nc.close()
if self.grid.east():
lon[lon < 0] += 360
lon, lat = np.meshgrid(lon, lat)
good = dat.nonzero()
lat = lat[good]
lon = lon[good]
data = [seapy.roms.obs.raw_data("TEMP", self.provenance, dat.compressed(),
err[good], self.temp_error)]
# Grid it
return seapy.roms.obs.gridder(self.grid, time, lon, lat, None,
data, self.dt, title)
class remss_swath(obsgen):
"""
class to process REMSS SST swath netcdf files into ROMS observation
files. The files may be AMSRE, TMI, etc. This is a subclass of
seapy.roms.genobs.genobs, and handles the loading of the data.
"""
def __init__(self, grid, dt, check_qc_flags=True, reftime=seapy.default_epoch, temp_error=0.4,
temp_limits=None, provenance="SST_REMSS"):
self.temp_error = temp_error
self.provenance = provenance.upper()
self.check_qc_flags = check_qc_flags
if temp_limits is None:
self.temp_limits = (2, 35)
else:
self.temp_limits = temp_limits
super().__init__(grid, dt, reftime)
def convert_file(self, file, title="REMSS SST Obs"):
"""
Load an REMSS file and convert into an obs structure
"""
# Load REMSS Data
nc = seapy.netcdf(file)
lon = nc.variables["lon"][:]
lat = nc.variables["lat"][:]
dat = np.ma.masked_outside(np.squeeze(
nc.variables["sea_surface_temperature"][:]) - 273.15,
self.temp_limits[0], self.temp_limits[1])
err = np.ma.masked_outside(np.squeeze(
nc.variables["sses_standard_deviation"][:]), 0.01, 2.0)
dat[err.mask] = np.ma.masked
# Check the data flags
if self.check_qc_flags:
flags = np.ma.masked_not_equal(
np.squeeze(nc.variables["quality_level"][:]), 5)
dat[flags.mask] = np.ma.masked
else:
dat = np.ma.masked_where(
np.squeeze(nc.variables["quality_level"][:]).data == 1, dat)
# Grab the observation time
time = netCDF4.num2date(nc.variables["time"][0],
nc.variables["time"].units) - self.epoch
dtime = nc.variables["sst_dtime"][:]
time = np.squeeze((time.total_seconds() + dtime) * seapy.secs2day)
nc.close()
if self.grid.east():
lon[lon < 0] += 360
good = dat.nonzero()
data = [seapy.roms.obs.raw_data("TEMP", self.provenance,
dat.compressed(),
err[good], self.temp_error)]
# Grid it
return seapy.roms.obs.gridder(self.grid, time[good], lon[good], lat[good],
None, data, self.dt, title)
class remss_map(obsgen):
"""
class to process REMSS SST map netcdf files into ROMS observation
files. The files may be AMSRE, TMI, etc. This is a subclass of
seapy.roms.genobs.genobs, and handles the loading of the data.
"""
def __init__(self, grid, dt, reftime=seapy.default_epoch, temp_error=0.4,
temp_limits=None, provenance="SST_REMSS"):
self.temp_error = temp_error
self.provenance = provenance.upper()
if temp_limits is None:
self.temp_limits = (2, 35)
else:
self.temp_limits = temp_limits
super().__init__(grid, dt, reftime)
def convert_file(self, file, title="REMSS SST Obs"):
"""
Load an REMSS file and convert into an obs structure
"""
# Load REMSS Data
nc = seapy.netcdf(file)
lon = nc.variables["lon"][:]
lat = nc.variables["lat"][:]
dat = np.ma.masked_outside(np.squeeze(
nc.variables["sea_surface_temperature"][:]) - 273.15,
self.temp_limits[0], self.temp_limits[1])
err = np.ma.masked_outside(np.squeeze(
nc.variables["SSES_standard_deviation_error"][:]), 0.01, 2.0)
dat[err.mask] = np.ma.masked
# Check the data flags
flags = np.ma.masked_not_equal(
np.squeeze(nc.variables["rejection_flag"][:]), 0)
dat[flags.mask] = np.ma.masked
err[flags.mask] = np.ma.masked
# Grab the observation time
time = netCDF4.num2date(nc.variables["time"][:],
nc.variables["time"].units)
time = np.array([(t - self.epoch).total_seconds() * seapy.secs2day
for t in time])
sst_time = nc.variables["sst_dtime"][:] * seapy.secs2day
for n, i in enumerate(time):
sst_time[n, :, :] += i
sst_time[dat.mask] = np.ma.masked
# Set up the coordinate
lon, lat = np.meshgrid(lon, lat)
lon = np.ma.masked_where(dat.mask, seapy.adddim(lon, len(time)))
lat = np.ma.masked_where(dat.mask, seapy.adddim(lat, len(time)))
nc.close()
if self.grid.east():
lon[lon < 0] += 360
data = [seapy.roms.obs.raw_data("TEMP", self.provenance,
dat.compressed(),
err.compressed(), self.temp_error)]
# Grid it
return seapy.roms.obs.gridder(self.grid, sst_time.compressed(),
lon.compressed(), lat.compressed, None,
data, self.dt, title)
class viirs_swath(obsgen):
"""
class to process VIIRS SST swath netcdf files into ROMS observation
files. This is a subclass of
seapy.roms.obsgen.obsgen, and handles the loading of the data.
"""
def __init__(self, grid, dt, check_qc_flags=True, reftime=seapy.default_epoch,
temp_error=0.4, temp_limits=None, provenance="SST_VIIRS"):
self.temp_error = temp_error
self.provenance = provenance.upper()
self.check_qc_flags = check_qc_flags
if temp_limits is None:
self.temp_limits = (2, 35)
else:
self.temp_limits = temp_limits
super().__init__(grid, dt, reftime)
def convert_file(self, file, title="VIIRS SST Obs"):
"""
Load a VIIRS file and convert into an obs structure
"""
# Load VIIRS Data
nc = seapy.netcdf(file, aggdim="time")
lon = nc.variables["lon"][:]
lat = nc.variables["lat"][:]
dat = np.ma.masked_outside(
nc.variables["sea_surface_temperature"][:] - 273.15,
self.temp_limits[0], self.temp_limits[1])
err = np.ma.masked_outside(
nc.variables["sses_standard_deviation"][:], 0.01, 2.0)
dat[err.mask] = np.ma.masked
# Check the data flags
if self.check_qc_flags:
flags = np.ma.masked_not_equal(
nc.variables["quality_level"][:], 5)
dat[flags.mask] = np.ma.masked
else:
dat = np.ma.masked_where(
nc.variables["quality_level"][:].data == 1, dat)
# Grab the observation time
time = netCDF4.num2date(nc.variables["time"][:],
nc.variables["time"].units) - self.epoch
time = np.asarray([x.total_seconds() for x in time])[:,np.newaxis,np.newaxis]
dtime = nc.variables["sst_dtime"][:]
time = (time + dtime) * seapy.secs2day
nc.close()
# Set up the coordinate
lon = np.ma.masked_where(dat.mask, seapy.adddim(lon, len(time)))
lat = np.ma.masked_where(dat.mask, seapy.adddim(lat, len(time)))
if self.grid.east():
lon[lon < 0] += 360
good = dat.nonzero()
data = [seapy.roms.obs.raw_data("TEMP", self.provenance,
dat.compressed(),
err[good], self.temp_error)]
# Grid it
return seapy.roms.obs.gridder(self.grid, time[good], lon[good], lat[good],
None, data, self.dt, title)
##############################################################################
#
# IN SITU DATA
#
##############################################################################
class seaglider_profile(obsgen):
"""
class to process SeaGlider .pro files into ROMS observation
files. This is a subclass of seapy.roms.genobs.genobs, and handles
the loading of the data.
"""
def __init__(self, grid, dt, reftime=seapy.default_epoch, dtype=None, temp_limits=None,
salt_limits=None, depth_limit=-15, temp_error=0.2,
salt_error=0.05):
if temp_limits is None:
self.temp_limits = (5, 30)
else:
self.temp_limits = temp_limits
if salt_limits is None:
self.salt_limits = (31, 35.5)
else:
self.salt_limits = salt_limits
if dtype is None:
self.dtype = {'names': ('time', 'pres', 'depth', 'temp', 'cond',
'salt', 'sigma', 'lat', 'lon'),
'formats': ['f4'] * 9}
else:
self.dtype = dtype
self.depth_limit = depth_limit
self.temp_error = temp_error
self.salt_error = salt_error
super().__init__(grid, dt, reftime)
def convert_file(self, file, title="SeaGlider Obs"):
"""
Load a SeaGlider .pro file and convert into an obs structure
"""
import re
# Load the text file. All data goes into the pro dictionary
# as defined by dtype. The header information needs to be parsed
with open(file) as myfile:
header = [myfile.readline() for i in range(19)]
pro = np.loadtxt(myfile, self.dtype, delimiter=',', comments='%')
# Parse the header information
parser = re.compile('^%(\w+): (.*)$')
params = {}
for line in header:
try:
opt = parser.findall(line)
params[opt[0][0]] = opt[0][1]
except:
pass
# Determine the needed information from the headers
glider_name = "GLIDER" if params.get("glider", None) is None else \
"GLIDER_SG" + params["glider"]
provenance = seapy.roms.obs.asprovenance(glider_name)
try:
date = [int(s) for s in re.findall('([\d]{2})\s', params["start"])]
start_time = datetime.datetime.strptime(params["start"].strip(),
"%m %d 1%y %H %M %S")
dtime = (start_time - self.epoch).total_seconds() / 86400
except:
raise ValueError("date format incorrect in file: " + file)
# Make sure that the GPS fix isn't screwy
if self.grid.east():
pro["lon"][pro["lon"] < 0] += 360
dist = seapy.earth_distance(pro["lon"][0], pro["lat"][0],
pro["lon"][-1], pro["lat"][-1])
velocity = dist / pro["time"][-1]
if velocity > 2:
warn("WARNING: GPS fix is incorrect for " + file)
return None
# Build the data with masked entries
temp = np.ma.masked_outside(pro["temp"], self.temp_limits[0],
self.temp_limits[1])
salt = np.ma.masked_outside(pro["salt"], self.salt_limits[0],
self.salt_limits[1])
depth = np.ma.masked_greater(-pro["depth"], self.depth_limit)
good = ~np.ma.getmaskarray(depth)
# Grid it
data = [seapy.roms.obs.raw_data("TEMP", provenance, temp[good],
None, self.temp_error),
seapy.roms.obs.raw_data("SALT", provenance, salt[good],
None, self.salt_error)]
return seapy.roms.obs.gridder(self.grid, pro["time"][good] / 86400 + dtime,
pro["lon"][good],
pro["lat"][good],
depth.compressed(),
data, self.dt, title)
class mooring(obsgen):
"""
Class to process generic moorings into ROMS observation files. This
handles temp, salt, u, and v.
"""
def __init__(self, grid, dt, reftime=seapy.default_epoch, temp_limits=None,
salt_limits=None, u_limits=None, v_limits=None,
depth_limit=0, temp_error=0.25, salt_error=0.08,
u_error=0.08, v_error=0.08, lat=None, lon=None,
provenance=None):
if temp_limits is None:
self.temp_limits = (5, 35)
else:
self.temp_limits = temp_limits
if salt_limits is None:
self.salt_limits = (31, 35.5)
else:
self.salt_limits = salt_limits
if u_limits is None:
self.u_limits = (-3, 3)
else:
self.u_limits = u_limits
if v_limits is None:
self.v_limits = (-3, 3)
else:
self.v_limits = v_limits
if provenance is None:
self.provenance = seapy.roms.obs.asprovenance("MOORING")
else:
self.provenance = provenance.upper()
self.depth_limit = depth_limit
self.temp_error = temp_error
self.salt_error = salt_error
self.u_error = u_error
self.v_error = v_error
self.lat = np.atleast_1d(lat)
self.lon = np.atleast_1d(lon)
super().__init__(grid, dt, reftime)
def convert_data(self, time, depth, data, error=None, title="Mooring Obs"):
"""
Given a set of data, process into an observation structure
Parameters
----------
time : ndarray
time of observations
depth : ndarray
depth of observations. depth is in rows, time in columns.
If depth does not change with time, it will be replicated in time.
data : dict
data to put into observations. A dictionary using seapy.roms.fields
as keys.
error : dict, optional
error of the observations (same keys and sizes as data)
title : string, optional
title for obs
Returns
-------
obs: seapy.roms.obs.obs
"""
# Check that the lat/lon is in the grid
if self.grid.east():
self.lon[self.lon <= 0] += 360
else:
self.lon[self.lon >= 180] -= 360
if not np.logical_and.reduce((
self.lon >= np.min(self.grid.lon_rho),
self.lon <= np.max(self.grid.lon_rho),
self.lat >= np.min(self.grid.lat_rho),
self.lat <= np.max(self.grid.lat_rho))):
warn("Mooring location is not in grid")
return
depth = np.atleast_1d(depth)
if not error:
error = {}
if not ta:
warn("No data is provided")
return
# Process the data
obsdata = []
for field in data:
limit = getattr(self, field + '_limits')
vals = np.ma.masked_outside(data[field], limit[0], limit[1],
copy=False)
obsdata.append(seapy.roms.obs.raw_data(field, self.provenance,
vals, getattr(
error, field, None),
getattr(self, field + '_error')))
ndep = depth.size
nt = len(time)
lat = np.resize(self.lat, (nt, ndep))
lon = np.resize(self.lon, (nt, ndep))
depth = np.resize(depth, (nt, ndep))
time = np.resize(time, (nt, ndep))
return seapy.roms.obs.gridder(self.grid, time, lon, lat, depth,
obsdata, self.dt, title)
class tao_mooring(mooring):
"""
class to process TAO files into ROMS observation
files. This is a subclass of seapy.roms.genobs.genobs, and handles
the loading of the data.
"""
def __init__(self, grid, dt, reftime=seapy.default_epoch, temp_limits=None,
salt_limits=None, u_limits=None, v_limits=None,
depth_limit=0, temp_error=0.25, salt_error=0.08,
u_error=0.08, v_error=0.08):
super().__init__(grid, dt, reftime)
def convert_file(self, file, title="TAO Obs"):
"""
Load a TAO netcdf file and convert into an obs structure
"""
vals = {"temp": ["T_20", "QT_5020"],
"salt": ["S_41", "QS_5041"],
"u": ["U_320", "QS_5300"],
"v": ["V_321", "QS_5300"]}
nc = seapy.netcdf(file)
lat = nc.variables["lat"][:]
lon = nc.variables["lon"][:]
if not self.grid.east():
lon[lon > 180] -= 360
lat, lon = np.meshgrid(lat, lon)
time = netCDF4.num2date(nc.variables["time"][:],
nc.variables["time"].units) - self.epoch
time = list(map(lambda x: x.total_seconds() * seapy.secs2day, time))
depth = -nc.variables["depth"][:]
profile_list = np.where(np.logical_and.reduce((
lon >= np.min(self.grid.lon_rho),
lon <= np.max(self.grid.lon_rho),
lat >= np.min(self.grid.lat_rho),
lat <= np.max(self.grid.lat_rho))))
# If nothing is in the area, return nothing
if not profile_list[0].size:
return None
# Process each of the variables that are present
obsdata = []
for field in vals:
limit = getattr(self, field + '_limits')
if vals[field][0] in nc.variables:
data = nc.variables[vals[field][0]][:]
data = np.ma.masked_outside(
data[profile_list[0], profile_list[1], :, :],
limit[0], limit[1], copy=False)
qc = nc.variables[vals[field][1]][:]
qc = qc[profile_list[0], profile_list[1], :, :]
bad = np.where(np.logical_and(qc != 1, qc != 2))
data[bad] = np.ma.masked
obsdata.append(seapy.roms.obs.raw_data(field, "TAO_ARRAY",
data.compressed(), None,
getattr(self, field + '_error')))
nc.close()
# Build the time, lon, lat, and depth arrays of appropriate size
npts = profile_list[0].size
ndep = depth.size
nt = len(time)
lat = np.resize(lat[profile_list], (nt, ndep, npts))
lat = np.squeeze(np.transpose(lat, (2, 1, 0)))[~data.mask]
lon = np.resize(lon[profile_list], (nt, ndep, npts))
lon = np.squeeze(np.transpose(lon, (2, 1, 0)))[~data.mask]
depth = np.resize(depth, (npts, nt, ndep))
depth = np.squeeze(np.transpose(depth, (0, 2, 1)))[~data.mask]
time = np.squeeze(np.resize(time, (npts, ndep, nt)))[~data.mask]
return seapy.roms.obs.gridder(self.grid, time, lon, lat, depth,
obsdata, self.dt, title)
class argo_ctd(obsgen):
"""
class to process ARGO CTD netcdf files into ROMS observation
files. This is a subclass of seapy.roms.genobs.genobs, and handles
the loading of the data.
"""
def __init__(self, grid, dt, reftime=seapy.default_epoch, temp_limits=None,
salt_limits=None, temp_error=0.25,
salt_error=0.1):
if temp_limits is None:
self.temp_limits = (2, 35)
else:
self.temp_limits = temp_limits
if salt_limits is None:
self.salt_limits = (10, 35.5)
else:
self.salt_limits = salt_limits
self.temp_error = temp_error
self.salt_error = salt_error
super().__init__(grid, dt, reftime)
def convert_file(self, file, title="Argo Obs"):
"""
Load an Argo file and convert into an obs structure
"""
nc = seapy.netcdf(file,aggdim="N_PROF")
# Load the position of all profiles in the file
lon = nc.variables["LONGITUDE"][:]
lat = nc.variables["LATITUDE"][:]
pro_q = nc.variables["POSITION_QC"][:].astype(int)
# Find the profiles that are in our area with known locations quality
if self.grid.east():
lon[lon < 0] += 360
profile_list = np.where(np.logical_and.reduce((
lat >= np.min(self.grid.lat_rho),
lat <= np.max(self.grid.lat_rho),
lon >= np.min(self.grid.lon_rho),
lon <= np.max(self.grid.lon_rho),
pro_q == 1)))[0]
# Check which are good profiles
profile_qc = nc.variables["PROFILE_PRES_QC"][
profile_list].astype('<U1')
profile_list = profile_list[profile_qc == 'A']
if not profile_list.size:
return None
# Load only the data from those in our area
julian_day = nc.variables["JULD_LOCATION"][profile_list]
argo_epoch = datetime.datetime.strptime(''.join(
nc.variables["REFERENCE_DATE_TIME"][:].astype('<U1')), '%Y%m%d%H%M%S')
time_delta = (self.epoch - argo_epoch).days
file_stamp = datetime.datetime.strptime(''.join(
nc.variables["DATE_CREATION"][:].astype('<U1')), '%Y%m%d%H%M%S')
# Grab data over the previous day
file_time = np.minimum((file_stamp - argo_epoch).days,
int(np.max(julian_day)))
time_list = np.where(julian_day >= file_time - 1)[0]
julian_day = julian_day[time_list]
lon = lon[profile_list[time_list]]
lat = lat[profile_list[time_list]]
profile_list = profile_list[time_list]
# Load the data in our region and time
temp = nc.variables["TEMP"][profile_list, :]
temp_qc = nc.variables["TEMP_QC"][profile_list, :]
salt = nc.variables["PSAL"][profile_list, :]
salt_qc = nc.variables["PSAL_QC"][profile_list, :]
pres = nc.variables["PRES"][profile_list, :]
pres_qc = nc.variables["PRES_QC"][profile_list, :]
nc.close()
# Ensure consistency
full_mask = np.logical_or.reduce((temp.mask, salt.mask, pres.mask))
temp[full_mask] = np.ma.masked
temp_qc[full_mask] = np.ma.masked
salt[full_mask] = np.ma.masked
salt_qc[full_mask] = np.ma.masked
pres[full_mask] = np.ma.masked
pres_qc[full_mask] = np.ma.masked
# Combine the QC codes
qc = np.mean(np.vstack((temp_qc.compressed(), salt_qc.compressed(),
pres_qc.compressed())).astype(int), axis=0)
good_data = np.where(qc == 1)
# Put everything together into individual observations
time = np.resize(julian_day - time_delta,
pres.shape[::-1]).T[~temp.mask][good_data]
lat = np.resize(lat, pres.shape[::-1]).T[~temp.mask][good_data]
lon = np.resize(lon, pres.shape[::-1]).T[~temp.mask][good_data]
depth = -seapy.seawater.depth(pres.compressed()[good_data], lat)
# Apply the limits
temp = np.ma.masked_outside(temp.compressed()[good_data],
self.temp_limits[0], self.temp_limits[1])
salt = np.ma.masked_outside(salt.compressed()[good_data],
self.salt_limits[0], self.salt_limits[1])
data = [seapy.roms.obs.raw_data("TEMP", "CTD_ARGO", temp,
None, self.temp_error),
seapy.roms.obs.raw_data("SALT", "CTD_ARGO", salt,
None, self.salt_error)]
return seapy.roms.obs.gridder(self.grid, time, lon, lat, depth,
data, self.dt, title)
|
dalepartridge/seapy
|
roms/obsgen.py
|
Python
|
mit
| 46,813
|
[
"Brian",
"NetCDF"
] |
4271138112dc40ef5c094a0d859b72be24482556b1daa352f3543b7a056002a7
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A runner that allows running of Beam pipelines interactively.
This module is experimental. No backwards-compatibility guarantees.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import apache_beam as beam
from apache_beam import runners
from apache_beam.runners.direct import direct_runner
from apache_beam.runners.interactive import cache_manager as cache
from apache_beam.runners.interactive import pipeline_analyzer
from apache_beam.runners.interactive.display import display_manager
from apache_beam.runners.interactive.display import pipeline_graph_renderer
# size of PCollection samples cached.
SAMPLE_SIZE = 8
class InteractiveRunner(runners.PipelineRunner):
"""An interactive runner for Beam Python pipelines.
Allows interactively building and running Beam Python pipelines.
"""
def __init__(self, underlying_runner=None, cache_dir=None,
render_option=None):
"""Constructor of InteractiveRunner.
Args:
underlying_runner: (runner.PipelineRunner)
cache_dir: (str) the directory where PCollection caches are kept
render_option: (str) this parameter decides how the pipeline graph is
rendered. See display.pipeline_graph_renderer for available options.
"""
self._underlying_runner = (underlying_runner
or direct_runner.DirectRunner())
self._cache_manager = cache.FileBasedCacheManager(cache_dir)
self._renderer = pipeline_graph_renderer.get_renderer(render_option)
self._in_session = False
def set_render_option(self, render_option):
"""Sets the rendering option.
Args:
render_option: (str) this parameter decides how the pipeline graph is
rendered. See display.pipeline_graph_renderer for available options.
"""
self._renderer = pipeline_graph_renderer.get_renderer(render_option)
def start_session(self):
"""Start the session that keeps back-end managers and workers alive.
"""
if self._in_session:
return
enter = getattr(self._underlying_runner, '__enter__', None)
if enter is not None:
logging.info('Starting session.')
self._in_session = True
enter()
else:
logging.error('Keep alive not supported.')
def end_session(self):
"""End the session that keeps backend managers and workers alive.
"""
if not self._in_session:
return
exit = getattr(self._underlying_runner, '__exit__', None)
if exit is not None:
self._in_session = False
logging.info('Ending session.')
exit(None, None, None)
def cleanup(self):
self._cache_manager.cleanup()
def apply(self, transform, pvalueish):
# TODO(qinyeli, BEAM-646): Remove runner interception of apply.
return self._underlying_runner.apply(transform, pvalueish)
def run_pipeline(self, pipeline):
if not hasattr(self, '_desired_cache_labels'):
self._desired_cache_labels = set()
# Invoke a round trip through the runner API. This makes sure the Pipeline
# proto is stable.
pipeline = beam.pipeline.Pipeline.from_runner_api(
pipeline.to_runner_api(use_fake_coders=True),
pipeline.runner,
pipeline._options)
# Snapshot the pipeline in a portable proto before mutating it.
pipeline_proto, original_context = pipeline.to_runner_api(
return_context=True, use_fake_coders=True)
pcolls_to_pcoll_id = self._pcolls_to_pcoll_id(pipeline, original_context)
analyzer = pipeline_analyzer.PipelineAnalyzer(self._cache_manager,
pipeline_proto,
self._underlying_runner,
pipeline._options,
self._desired_cache_labels)
# Should be only accessed for debugging purpose.
self._analyzer = analyzer
pipeline_to_execute = beam.pipeline.Pipeline.from_runner_api(
analyzer.pipeline_proto_to_execute(),
self._underlying_runner,
pipeline._options)
display = display_manager.DisplayManager(
pipeline_proto=pipeline_proto,
pipeline_analyzer=analyzer,
cache_manager=self._cache_manager,
pipeline_graph_renderer=self._renderer)
display.start_periodic_update()
result = pipeline_to_execute.run()
result.wait_until_finish()
display.stop_periodic_update()
return PipelineResult(result, self, self._analyzer.pipeline_info(),
self._cache_manager, pcolls_to_pcoll_id)
def _pcolls_to_pcoll_id(self, pipeline, original_context):
"""Returns a dict mapping PCollections string to PCollection IDs.
Using a PipelineVisitor to iterate over every node in the pipeline,
records the mapping from PCollections to PCollections IDs. This mapping
will be used to query cached PCollections.
Args:
pipeline: (pipeline.Pipeline)
original_context: (pipeline_context.PipelineContext)
Returns:
(dict from str to str) a dict mapping str(pcoll) to pcoll_id.
"""
pcolls_to_pcoll_id = {}
from apache_beam.pipeline import PipelineVisitor # pylint: disable=import-error
class PCollVisitor(PipelineVisitor): # pylint: disable=used-before-assignment
""""A visitor that records input and output values to be replaced.
Input and output values that should be updated are recorded in maps
input_replacements and output_replacements respectively.
We cannot update input and output values while visiting since that
results in validation errors.
"""
def enter_composite_transform(self, transform_node):
self.visit_transform(transform_node)
def visit_transform(self, transform_node):
for pcoll in transform_node.outputs.values():
pcolls_to_pcoll_id[str(pcoll)] = original_context.pcollections.get_id(
pcoll)
pipeline.visit(PCollVisitor())
return pcolls_to_pcoll_id
class PipelineResult(beam.runners.runner.PipelineResult):
"""Provides access to information about a pipeline."""
def __init__(self, underlying_result, runner, pipeline_info, cache_manager,
pcolls_to_pcoll_id):
super(PipelineResult, self).__init__(underlying_result.state)
self._runner = runner
self._pipeline_info = pipeline_info
self._cache_manager = cache_manager
self._pcolls_to_pcoll_id = pcolls_to_pcoll_id
def _cache_label(self, pcoll):
pcoll_id = self._pcolls_to_pcoll_id[str(pcoll)]
return self._pipeline_info.cache_label(pcoll_id)
def wait_until_finish(self):
# PipelineResult is not constructed until pipeline execution is finished.
return
def get(self, pcoll):
cache_label = self._cache_label(pcoll)
if self._cache_manager.exists('full', cache_label):
pcoll_list, _ = self._cache_manager.read('full', cache_label)
return pcoll_list
else:
self._runner._desired_cache_labels.add(cache_label) # pylint: disable=protected-access
raise ValueError('PCollection not available, please run the pipeline.')
def sample(self, pcoll):
cache_label = self._cache_label(pcoll)
if self._cache_manager.exists('sample', cache_label):
return self._cache_manager.read('sample', cache_label)
else:
self._runner._desired_cache_labels.add(cache_label) # pylint: disable=protected-access
raise ValueError('PCollection not available, please run the pipeline.')
|
rangadi/beam
|
sdks/python/apache_beam/runners/interactive/interactive_runner.py
|
Python
|
apache-2.0
| 8,330
|
[
"VisIt"
] |
f83f5105cf0e34da2c2a1cb39693628e8e2e2837d53caf4d01974e9231af5a15
|
"""
Statistical Cluster Model Similarity class. A Gaussian representation
of a song. The distance between two models is computed with the
symmetrized Kullback Leibler Divergence.
"""
import numpy
from numpy import log
from decoder import AudioDecoder, init_gstreamer
from filter import Filter
import scipy.sparse
class SCMS(object):
def __init__(self, mean, cov):
self.cov = cov
self.mean = mean
self.dim = self.mean.size
def __sub__(self, b):
"""
Kullback Leibler Divergence.
"""
meandiff = numpy.matrix(b.mean - self.mean)
#print ' mean-diff:', meandiff
#print ' dets', numpy.linalg.det(self.cov), numpy.linalg.det(b.cov)
#print 'covariance-product:', b.invcov * self.cov
#second = b.invcov * self.cov
second = numpy.linalg.solve(b.cov, self.cov)
#first = meandiff.T * b.invcov * meandiff
#print b.cov.shape, meandiff.shape
#print 'linalg.solve:', b.cov.shape, meandiff.shape
first = numpy.linalg.solve(b.cov, meandiff)
#print ' second', second
#print ' first', first
kl = 0.5 * (numpy.trace(second) + \
meandiff.T * first - \
self.dim - log(numpy.linalg.det(self.cov) / numpy.linalg.det(b.cov)))
#print ' X', numpy.trace(second), meandiff.T * first
#print ' X', self.dim, - log(numpy.linalg.det(self.cov) / numpy.linalg.det(b.cov))
# some tolerance for rounding problems
if kl < 0 and kl > -0.1 * self.dim:
kl = 0
assert kl >= 0, kl
return kl
def distance(self, b):
"""
symmetrized Kullback Leibler Divergence
"""
assert b.mean.shape == self.mean.shape, (b.mean.shape, self.mean.shape)
assert b.cov.shape == self.cov.shape, (b.cov.shape, self.cov.shape)
#print b.mean.shape, b.cov.shape, self.mean.shape, self.cov.shape
meandiff = numpy.matrix(b.mean - self.mean)
second = numpy.linalg.solve(b.cov, self.cov)
first = numpy.linalg.solve(b.cov, meandiff)
kl = (numpy.trace(second) + meandiff.T * first - self.dim) / 2.
meandiff = numpy.matrix(self.mean - b.mean)
second = numpy.linalg.solve(self.cov, b.cov)
first = numpy.linalg.solve(self.cov, meandiff)
kl += (numpy.trace(second) + meandiff.T * first - self.dim) / 2.
if kl < 0 and kl > -0.1 * self.dim:
kl = 0
assert kl >= 0, kl
return kl
def __repr__(self):
return """SCMS(dim=%d, means=%s, cov=%s)""" % (self.dim, self.mean, repr(self.cov))
def scms_from_mfcc(mfcc):
dim = len(mfcc)
#print 'DIM:', self.dim, mfcc.shape
mean = mfcc.mean(axis=1)
#cov = numpy.cov(mfcc)
stdev = numpy.asarray(mfcc).std(axis=1)
cov = numpy.diag(stdev)
#cov = scipy.sparse.dia_matrix((stdev, [0]), shape=(self.dim, self.dim)).todense()
assert mean.shape == (dim, 1), mean.shape
assert cov.shape == (dim, dim), cov.shape
res = SCMS(mean, cov)
assert res.mean.shape == (dim, 1), res.mean.shape
assert res.cov.shape == (dim, dim), res.cov.shape
return res
#def symmetric_distance(a, b):
# return ((a - b) + (b - a)) / 2.
if __name__ == '__main__':
numpy.random.seed(0)
adata = numpy.random.multivariate_normal([0., 0], [[1., 0], [0, 1]], size=1000)
#print 'adata:', adata
a = scms_from_mfcc(adata.T)
#a.mean[:] = [0, 0]
#a.cov[:,:] = [[1., 0], [0, 1]]
print 'A:', a.mean, a.cov
print a - a
bdata = numpy.random.multivariate_normal([1., 1.], [[1., 0.1], [0.1, 1]], size=1000)
b = scms_from_mfcc(bdata.T)
#b.mean[:] = [1., 1.]
#b.cov[:,:] = [[2., 0.1], [0.1, 2]]
print 'B:', b.mean, b.cov
print b - a
|
JohannesBuchner/PyMirage
|
pymirage/analyse.py
|
Python
|
gpl-2.0
| 3,397
|
[
"Gaussian"
] |
1624e8112da6fa63498ab5255da6038ccdb72a9710708654d5525e7b19e9ecd5
|
import argparse, os
from paraview import simple
from paraview import data_exploration as wx
try:
simple.LoadDistributedPlugin('RGBZView', ns=globals())
except:
print 'Unable to load RGBZView plugin'
def generateData(datasetPath, outputDir) :
if not os.path.exists(outputDir):
os.makedirs(outputDir)
resolution = 500
center_of_rotation = [0.0, 0.0, 0.0]
rotation_axis = [0.0, 0.0, 1.0]
distance = 45.0
disk_out_refex2 = simple.ExodusIIReader(FileName=[datasetPath])
disk_out_refex2.PointVariables = ['Temp', 'V', 'Pres', 'AsH3', 'GaMe3', 'CH4', 'H2']
disk_out_refex2.NodeSetArrayStatus = []
disk_out_refex2.SideSetArrayStatus = []
disk_out_refex2.ElementBlocks = ['Unnamed block ID: 1 Type: HEX8']
filters = []
filters_description = []
calculator1 = simple.Calculator(Input=disk_out_refex2)
calculator1.ResultArrayName = 'Velocity'
calculator1.Function = 'mag(V)'
simple.UpdatePipeline()
color_by = []
#
# COMPLAINT
#
# As a user of this system, I'd like not to have to specify that I need
# 'nX', 'nY', and 'nZ' when I add a colorby of type "VALUE". Instead,
# I'd like it to figure out that I'm going to need normals for that kind
# of rendering and add them for me.
#
color_type = [
('VALUE', "Velocity"),
('VALUE', "Pres"),
('VALUE', "Temp"),
('VALUE', "nX"),
('VALUE', "nY"),
('VALUE', "nZ")
]
pdi = calculator1.GetPointDataInformation()
#
# COMPLAINT
#
# Ditto the above complaint here.
#
luts = {
"Velocity": ["point", "Velocity", 0, pdi.GetArray("Velocity").GetRange()],
"Pres": ["point", "Pres", 0, pdi.GetArray("Pres").GetRange()],
"Temp": ["point", "Temp", 0, pdi.GetArray("Temp").GetRange()],
"nX": ["point", "Normals", 0, (-1,1)],
"nY": ["point", "Normals", 1, (-1,1)],
"nZ": ["point", "Normals", 2, (-1,1)]
}
contour_values = [ 300.0, 600.0, 900.0 ]
for iso_value in contour_values:
contour = simple.Contour(
Input=calculator1,
PointMergeMethod="Uniform Binning",
ContourBy = ['POINTS', 'Temp'],
Isosurfaces = [iso_value],
ComputeScalars = 1)
# Add this isocontour to my list of filters
filters.append( contour )
color_by.append( color_type )
filters_description.append( {'name': 'iso=%s' % str(iso_value), 'parent': "Contour by temperature"} )
# create a new 'Stream Tracer'
streamTracer1 = StreamTracer(Input=calculator1,
SeedType='High Resolution Line Source')
streamTracer1.Vectors = ['POINTS', 'V']
streamTracer1.MaximumStreamlineLength = 20.15999984741211
# init the 'High Resolution Line Source' selected for 'SeedType'
streamTracer1.SeedType.Point1 = [-5.75, -5.75, -10.0]
streamTracer1.SeedType.Point2 = [5.75, 5.75, 10.15999984741211]
# create a new 'Tube'
tube1 = Tube(Input=streamTracer1)
tube1.Scalars = ['POINTS', 'Velocity']
tube1.Vectors = ['POINTS', 'Normals']
tube1.Radius = 0.10474160957336426
#
# COMPLAINT
#
# Here, because the "Normals" field of the tube filter is all funky
# (directions seem to change at the seed points, when integration
# proceeded in both directions), I actually needed to play around
# with ParaView until I found a filter that would get me nice
# looking normals. Then, that filter didn't have a "Normals" field,
# so I had to use a calculator to create it. Not super nice from a
# users perspective.
#
surfaceVectors1 = SurfaceVectors(Input=tube1)
surfaceVectors1.SelectInputVectors = ['POINTS', 'TubeNormals']
calculator2 = simple.Calculator(Input=surfaceVectors1)
calculator2.ResultArrayName = 'Normals'
calculator2.Function = 'TubeNormals'
# Now add the stream tubes to the filters list
filters.append(calculator2);
color_by.append(color_type);
filters_description.append({'name': 'Stream Tubes'})
# create a new 'Clip'
clip1 = Clip(Input=calculator1)
clip1.ClipType = 'Plane'
clip1.Value = 11.209410083552676
clip1.InsideOut = 1
# init the 'Plane' selected for 'ClipType'
clip1.ClipType.Origin = [0.0, 0.0, 0.07999992370605469]
clip1.ClipType.Normal = [0.7, 0.0, -0.4]
#
# COMPLAINT
#
# Here again, the output of the clip filter doesn't have a "Normals"
# field on points, so I have to do some funky stuff to get what I
# need. It would be nice if this could be figured out for me
# somehow.
#
extractSurface1 = ExtractSurface(Input=clip1)
generateSurfaceNormals1 = GenerateSurfaceNormals(Input=extractSurface1)
# Now add the first clip to the filters list
filters.append(generateSurfaceNormals1);
color_by.append(color_type);
filters_description.append({'name': 'Clip One'})
# create a new 'Clip'
clip2 = Clip(Input=calculator1)
clip2.ClipType = 'Plane'
clip2.Value = 11.209410083552676
clip2.InsideOut = 0
# init the 'Plane' selected for 'ClipType'
clip2.ClipType.Origin = [0.0, 0.0, 0.07999992370605469]
clip2.ClipType.Normal = [0.7, 0.0, -0.4]
#
# COMPLAINT
#
# Ditto the above complaint here.
#
extractSurface2 = ExtractSurface(Input=clip2)
generateSurfaceNormals2 = GenerateSurfaceNormals(Input=extractSurface2)
# Now add the second clip to the filters list
filters.append(generateSurfaceNormals2);
color_by.append(color_type);
filters_description.append({'name': 'Clip Two'})
title = "Composite Dynamic Rendering - Disk Out Ref"
description = "A sample dataset for dynamic rendering"
analysis = wx.AnalysisManager(outputDir, title, description)
id = 'composite'
title = '3D composite'
description = "contour set"
analysis.register_analysis(id, title, description, '{theta}/{phi}/{filename}', wx.CompositeImageExporter.get_data_type()+"-light")
fng = analysis.get_file_name_generator(id)
camera_handler = wx.ThreeSixtyCameraHandler(
fng,
None,
[ float(r) for r in range(0, 360, 30) ],
[ float(r) for r in range(-60, 61, 60) ],
center_of_rotation,
rotation_axis,
distance)
exporter = wx.CompositeImageExporter(
fng,
filters,
color_by,
luts,
camera_handler,
[resolution,resolution],
filters_description,
0, 0, 'png')
exporter.set_analysis(analysis)
analysis.begin()
exporter.UpdatePipeline(0)
analysis.end()
if __name__ == "__main__":
description = "Python script to generate a Cinema dataset from disk_out_ref.ex2"
parser = argparse.ArgumentParser(description=description)
parser.add_argument("-i", "--inputfile", default=None,
help="Fully qualified path to disk_out_ref.ex2 data file")
parser.add_argument("-o", "--outputdirectory", default=os.getcwd(),
help="Fully qualified path to output directory")
args = parser.parse_args()
generateData(args.inputfile, args.outputdirectory)
|
Kitware/cinema
|
scripts/data_generation/generateDiskOutRefData.py
|
Python
|
bsd-3-clause
| 7,202
|
[
"ParaView"
] |
88bd43c3c8b35b6031bfefe61186d65543a60e847bf6074be7ece98c3f3ee176
|
from asyncio import gather, Lock, Semaphore, sleep, CancelledError
from collections import deque
from time import time, monotonic
from queue import Empty
from itertools import cycle
from sys import exit
from distutils.version import StrictVersion
from aiopogo import PGoApi, HashServer, json_loads, exceptions as ex
from aiopogo.auth_ptc import AuthPtc
from cyrandom import choice, randint, uniform
from pogeo import get_distance
from .db import FORT_CACHE, RAID_CACHE, MYSTERY_CACHE, SIGHTING_CACHE
from .utils import round_coords, load_pickle, get_device_info, get_start_coords, Units, randomize_point, calc_pokemon_level
from .shared import get_logger, LOOP, SessionManager, run_threaded, ACCOUNTS
from . import altitudes, avatar, bounds, db_proc, spawns, sanitized as conf
from python_anticaptcha import AnticaptchaClient, NoCaptchaTaskProxylessTask
from python_anticaptcha.exceptions import AnticatpchaException
if conf.NOTIFY:
from .notification import Notifier
if conf.CACHE_CELLS:
from array import typecodes
if 'Q' in typecodes:
from pogeo import get_cell_ids_compact as _pogeo_cell_ids
else:
from pogeo import get_cell_ids as _pogeo_cell_ids
else:
from pogeo import get_cell_ids as _pogeo_cell_ids
_unit = getattr(Units, conf.SPEED_UNIT.lower())
if conf.SPIN_POKESTOPS:
if _unit is Units.miles:
SPINNING_SPEED_LIMIT = 21
UNIT_STRING = "MPH"
elif _unit is Units.kilometers:
SPINNING_SPEED_LIMIT = 34
UNIT_STRING = "KMH"
elif _unit is Units.meters:
SPINNING_SPEED_LIMIT = 34000
UNIT_STRING = "m/h"
UNIT = _unit.value
del _unit
class Worker:
"""Single worker walking on the map"""
download_hash = ''
scan_delay = conf.SCAN_DELAY if conf.SCAN_DELAY >= 10 else 10
g = {'seen': 0, 'captchas': 0}
if conf.CACHE_CELLS:
cells = load_pickle('cells') or {}
@classmethod
def get_cell_ids(cls, point):
rounded = round_coords(point, 4)
try:
return cls.cells[rounded]
except KeyError:
cells = _pogeo_cell_ids(rounded)
cls.cells[rounded] = cells
return cells
else:
get_cell_ids = _pogeo_cell_ids
login_semaphore = Semaphore(conf.SIMULTANEOUS_LOGINS, loop=LOOP)
sim_semaphore = Semaphore(conf.SIMULTANEOUS_SIMULATION, loop=LOOP)
multiproxy = False
if conf.PROXIES:
if len(conf.PROXIES) > 1:
multiproxy = True
blacklistedProxies = set()
proxies = cycle(conf.PROXIES)
else:
proxies = None
if conf.NOTIFY:
notifier = Notifier()
def __init__(self, worker_no):
self.worker_no = worker_no
self.log = get_logger('worker-{}'.format(worker_no))
# account information
try:
self.account = self.extra_queue.get_nowait()
except Empty as e:
try:
self.account = self.captcha_queue.get_nowait()
except Empty as e:
raise ValueError("You don't have enough accounts for the number of workers specified in GRID.") from e
self.username = self.account['username']
try:
self.location = self.account['location'][:2]
except KeyError:
self.location = get_start_coords(worker_no)
self.altitude = None
# last time of any request
self.last_request = self.account.get('time', 0)
# last time of a request that requires user interaction in the game
self.last_action = self.last_request
# last time of a GetMapObjects request
self.last_gmo = self.last_request
try:
self.items = self.account['items']
self.bag_items = sum(self.items.values())
except KeyError:
self.account['items'] = {}
self.items = self.account['items']
self.inventory_timestamp = self.account.get('inventory_timestamp', 0) if self.items else 0
self.player_level = self.account.get('level')
self.num_captchas = 0
self.eggs = {}
self.unused_incubators = deque()
self.initialize_api()
# State variables
self.busy = Lock(loop=LOOP)
# Other variables
self.after_spawn = 0
self.speed = 0
self.total_seen = 0
self.error_code = 'INIT'
self.item_capacity = 350
self.visits = 0
self.pokestops = conf.SPIN_POKESTOPS
self.next_spin = 0
self.handle = HandleStub()
def initialize_api(self):
device_info = get_device_info(self.account)
self.empty_visits = 0
self.api = PGoApi(device_info=device_info)
self.api.set_position(*self.location, self.altitude)
if self.proxies:
self.api.proxy = next(self.proxies)
try:
if self.account['provider'] == 'ptc' and 'auth' in self.account:
self.api.auth_provider = AuthPtc(username=self.username, password=self.account['password'], timeout=conf.LOGIN_TIMEOUT)
self.api.auth_provider._access_token = self.account['auth']
self.api.auth_provider._access_token_expiry = self.account['expiry']
if self.api.auth_provider.check_access_token():
self.api.auth_provider.authenticated = True
except KeyError:
pass
def swap_proxy(self):
proxy = self.api.proxy
if proxy not in self.blacklistedProxies:
self.log.warning("Removing {} because of IP BAN error",proxy)
self.blacklistedProxies.add(proxy)
if len(self.blacklistedProxies) == len(conf.PROXIES):
self.log.warning("Resetting blacklistedProxies because there is no more proxy available")
self.blacklistedProxies.clear()
while proxy == self.api.proxy or self.api.proxy in self.blacklistedProxies:
self.api.proxy = next(self.proxies)
async def login(self, reauth=False):
"""Logs worker in and prepares for scanning"""
self.log.info('Trying to log in')
for attempt in range(-1, conf.MAX_RETRIES):
try:
self.error_code = '_'
async with self.login_semaphore:
self.error_code = 'LOGIN'
await self.api.set_authentication(
username=self.username,
password=self.account['password'],
provider=self.account.get('provider') or 'ptc',
timeout=conf.LOGIN_TIMEOUT
)
except ex.UnexpectedAuthError as e:
await self.swap_account('unexpected auth error')
except ex.AuthException as e:
err = e
await sleep(2, loop=LOOP)
else:
err = None
break
if reauth:
if err:
self.error_code = 'NOT AUTHENTICATED'
self.log.info('Re-auth error on {}: {}', self.username, err)
return False
self.error_code = None
return True
if err:
raise err
version = 7903
self.error_code = '-'
async with self.sim_semaphore:
self.error_code = 'APP SIMULATION'
if conf.APP_SIMULATION:
await self.app_simulation_login(version)
else:
await self.download_remote_config(version)
self.error_code = None
return True
async def get_player(self):
request = self.api.create_request()
request.get_player(player_locale=conf.PLAYER_LOCALE)
responses = await self.call(request, chain=False)
tutorial_state = None
try:
get_player = responses['GET_PLAYER']
if get_player.banned:
raise ex.BannedAccountException
player_data = get_player.player_data
tutorial_state = player_data.tutorial_state
# API can return 0 as capacity.
if player_data.max_item_storage != 0:
self.item_capacity = player_data.max_item_storage
if 'created' not in self.account:
self.account['created'] = player_data.creation_timestamp_ms / 1000
except (KeyError, TypeError, AttributeError):
pass
return tutorial_state
async def download_remote_config(self, version):
request = self.api.create_request()
request.download_remote_config_version(platform=1, app_version=version)
responses = await self.call(request, stamp=False, buddy=False, settings=True, inbox=False, dl_hash=False)
try:
inventory_items = responses['GET_INVENTORY'].inventory_delta.inventory_items
for item in inventory_items:
level = item.inventory_item_data.player_stats.level
if level:
self.player_level = level
break
except KeyError:
pass
await self.random_sleep(.78, 1.05)
try:
remote_config = responses['DOWNLOAD_REMOTE_CONFIG_VERSION']
return (
remote_config.asset_digest_timestamp_ms / 1000000,
remote_config.item_templates_timestamp_ms / 1000)
except KeyError:
return 0.0, 0.0
async def set_avatar(self, tutorial=False):
plater_avatar = avatar.new()
request = self.api.create_request()
request.list_avatar_customizations(
avatar_type=plater_avatar['avatar'],
slot=tuple(),
filters=(2,)
)
await self.call(request, buddy=not tutorial, action=5)
await self.random_sleep(7, 14)
request = self.api.create_request()
request.set_avatar(player_avatar=plater_avatar)
await self.call(request, buddy=not tutorial, action=2)
if tutorial:
await self.random_sleep(.5, 4)
request = self.api.create_request()
request.mark_tutorial_complete(tutorials_completed=(1,))
await self.call(request, buddy=False)
await self.random_sleep(.5, 1)
request = self.api.create_request()
request.get_player_profile()
await self.call(request, action=1)
async def app_simulation_login(self, version):
self.log.info('Starting RPC login sequence (iOS app simulation)')
# empty request
request = self.api.create_request()
await self.call(request, chain=False)
await self.random_sleep(.43, .97)
# request 1: get_player
tutorial_state = await self.get_player()
await self.random_sleep(.53, 1.1)
# request 2: download_remote_config_version
asset_time, template_time = await self.download_remote_config(version)
if asset_time > self.account.get('asset_time', 0.0):
# request 3: get_asset_digest
i = randint(0, 3)
result = 2
page_offset = 0
page_timestamp = 0
while result == 2:
request = self.api.create_request()
request.get_asset_digest(
platform=1,
app_version=version,
paginate=True,
page_offset=page_offset,
page_timestamp=page_timestamp)
responses = await self.call(request, buddy=False, settings=True)
if i > 2:
await sleep(1.45)
i = 0
else:
i += 1
await sleep(.2)
try:
response = responses['GET_ASSET_DIGEST']
except KeyError:
break
result = response.result
page_offset = response.page_offset
page_timestamp = response.timestamp_ms
self.account['asset_time'] = asset_time
if template_time > self.account.get('template_time', 0.0):
# request 4: download_item_templates
i = randint(0, 3)
result = 2
page_offset = 0
page_timestamp = 0
while result == 2:
request = self.api.create_request()
request.download_item_templates(
paginate=True,
page_offset=page_offset,
page_timestamp=page_timestamp)
responses = await self.call(request, buddy=False, settings=True)
if i > 2:
await sleep(1.5)
i = 0
else:
i += 1
await sleep(.25)
try:
response = responses['DOWNLOAD_ITEM_TEMPLATES']
except KeyError:
break
result = response.result
page_offset = response.page_offset
page_timestamp = response.timestamp_ms
self.account['template_time'] = template_time
if (conf.COMPLETE_TUTORIAL and
tutorial_state is not None and
not all(x in tutorial_state for x in (0, 1, 3, 4, 7))):
self.log.warning('{} is starting tutorial', self.username)
await self.complete_tutorial(tutorial_state)
else:
# request 5: get_player_profile
request = self.api.create_request()
request.get_player_profile()
await self.call(request, settings=True, inbox=False)
await self.random_sleep(.2, .3)
if self.player_level:
# request 6: level_up_rewards
request = self.api.create_request()
request.level_up_rewards(level=self.player_level)
await self.call(request, settings=True)
await self.random_sleep(.45, .7)
else:
self.log.warning('No player level')
self.log.info('Finished RPC login sequence (iOS app simulation)')
await self.random_sleep(.5, 1.3)
self.error_code = None
return True
async def complete_tutorial(self, tutorial_state):
self.error_code = 'TUTORIAL'
if 0 not in tutorial_state:
# legal screen
request = self.api.create_request()
request.mark_tutorial_complete(tutorials_completed=(0,))
await self.call(request, buddy=False)
await self.random_sleep(.35, .525)
request = self.api.create_request()
request.get_player(player_locale=conf.PLAYER_LOCALE)
await self.call(request, buddy=False)
await sleep(1)
if 1 not in tutorial_state:
# avatar selection
await self.set_avatar(tutorial=True)
starter_id = None
if 3 not in tutorial_state:
# encounter tutorial
await self.random_sleep(.7, .9)
request = self.api.create_request()
request.get_download_urls(asset_id=
('1a3c2816-65fa-4b97-90eb-0b301c064b7a/1487275569649000',
'aa8f7687-a022-4773-b900-3a8c170e9aea/1487275581132582',
'e89109b0-9a54-40fe-8431-12f7826c8194/1487275593635524'))
await self.call(request)
await self.random_sleep(7, 10.3)
request = self.api.create_request()
starter = choice((1, 4, 7))
request.encounter_tutorial_complete(pokemon_id=starter)
await self.call(request, action=1)
await self.random_sleep(.4, .5)
request = self.api.create_request()
request.get_player(player_locale=conf.PLAYER_LOCALE)
responses = await self.call(request)
try:
inventory = responses['GET_INVENTORY'].inventory_delta.inventory_items
for item in inventory:
pokemon = item.inventory_item_data.pokemon_data
if pokemon.id:
starter_id = pokemon.id
break
except (KeyError, TypeError):
starter_id = None
if 4 not in tutorial_state:
# name selection
await self.random_sleep(12, 18)
request = self.api.create_request()
request.claim_codename(codename=self.username)
await self.call(request, action=2)
await sleep(.7, loop=LOOP)
request = self.api.create_request()
request.get_player(player_locale=conf.PLAYER_LOCALE)
await self.call(request)
await sleep(.13, loop=LOOP)
request = self.api.create_request()
request.mark_tutorial_complete(tutorials_completed=(4,))
await self.call(request, buddy=False)
if 7 not in tutorial_state:
# first time experience
await self.random_sleep(3.9, 4.5)
request = self.api.create_request()
request.mark_tutorial_complete(tutorials_completed=(7,))
await self.call(request)
if starter_id:
await self.random_sleep(4, 5)
request = self.api.create_request()
request.set_buddy_pokemon(pokemon_id=starter_id)
await self.call(request, action=2)
await self.random_sleep(.8, 1.2)
await sleep(.2, loop=LOOP)
return True
def update_inventory(self, inventory_items):
for thing in inventory_items:
obj = thing.inventory_item_data
if obj.HasField('item'):
item = obj.item
self.items[item.item_id] = item.count
self.bag_items = sum(self.items.values())
elif conf.INCUBATE_EGGS:
if obj.HasField('pokemon_data') and obj.pokemon_data.is_egg:
egg = obj.pokemon_data
self.eggs[egg.id] = egg
elif obj.HasField('egg_incubators'):
self.unused_incubators.clear()
for item in obj.egg_incubators.egg_incubator:
if item.pokemon_id:
continue
if item.item_id == 901:
self.unused_incubators.append(item)
else:
self.unused_incubators.appendleft(item)
async def call(self, request, chain=True, stamp=True, buddy=True, settings=False, inbox=True, dl_hash=True, action=None):
if chain:
request.check_challenge()
request.get_hatched_eggs()
request.get_inventory(last_timestamp_ms=self.inventory_timestamp)
request.check_awarded_badges()
if settings:
if dl_hash:
request.download_settings(hash=self.download_hash)
else:
request.download_settings()
if buddy:
request.get_buddy_walked()
if inbox:
request.get_inbox(is_history=True)
if action:
now = time()
# wait for the time required, or at least a half-second
if self.last_action > now + .5:
await sleep(self.last_action - now, loop=LOOP)
else:
await sleep(0.5, loop=LOOP)
response = None
err = None
for attempt in range(-1, conf.MAX_RETRIES):
try:
responses = await request.call()
self.last_request = time()
err = None
break
except (ex.NotLoggedInException, ex.AuthException) as e:
self.log.info('Auth error on {}: {}', self.username, e)
err = e
await sleep(3, loop=LOOP)
if not await self.login(reauth=True):
await self.swap_account(reason='reauth failed')
except ex.TimeoutException as e:
self.error_code = 'TIMEOUT'
if not isinstance(e, type(err)):
err = e
self.log.warning('{}', e)
await sleep(10, loop=LOOP)
except ex.HashingOfflineException as e:
if not isinstance(e, type(err)):
err = e
self.log.warning('{}', e)
self.error_code = 'HASHING OFFLINE'
await sleep(5, loop=LOOP)
except ex.NianticOfflineException as e:
if not isinstance(e, type(err)):
err = e
self.log.warning('{}', e)
self.error_code = 'NIANTIC OFFLINE'
await self.random_sleep()
except ex.HashingQuotaExceededException as e:
if not isinstance(e, type(err)):
err = e
self.log.warning('Exceeded your hashing quota, sleeping.')
self.error_code = 'QUOTA EXCEEDED'
refresh = HashServer.status.get('period')
now = time()
if refresh:
if refresh > now:
await sleep(refresh - now + 1, loop=LOOP)
else:
await sleep(5, loop=LOOP)
else:
await sleep(30, loop=LOOP)
except ex.BadRPCException:
raise
except ex.InvalidRPCException as e:
self.last_request = time()
if not isinstance(e, type(err)):
err = e
self.log.warning('{}', e)
self.error_code = 'INVALID REQUEST'
await self.random_sleep()
except ex.ProxyException as e:
if not isinstance(e, type(err)):
err = e
self.error_code = 'PROXY ERROR'
if self.multiproxy:
self.log.error('{}, swapping proxy.', e)
self.swap_proxy()
else:
if not isinstance(e, type(err)):
self.log.error('{}', e)
await sleep(5, loop=LOOP)
except (ex.MalformedResponseException, ex.UnexpectedResponseException) as e:
self.last_request = time()
if not isinstance(e, type(err)):
self.log.warning('{}', e)
self.error_code = 'MALFORMED RESPONSE'
await self.random_sleep()
if err is not None:
raise err
if action:
# pad for time that action would require
self.last_action = self.last_request + action
try:
delta = responses['GET_INVENTORY'].inventory_delta
self.inventory_timestamp = delta.new_timestamp_ms
self.update_inventory(delta.inventory_items)
except KeyError:
pass
if settings:
try:
dl_settings = responses['DOWNLOAD_SETTINGS']
Worker.download_hash = dl_settings.hash
except KeyError:
self.log.info('Missing DOWNLOAD_SETTINGS response.')
else:
if (not dl_hash
and conf.FORCED_KILL
and dl_settings.settings.minimum_client_version != '0.79.3'):
forced_version = StrictVersion(dl_settings.settings.minimum_client_version)
if forced_version > StrictVersion('0.79.3'):
err = '{} is being forced, exiting.'.format(forced_version)
self.log.error(err)
print(err)
exit()
try:
challenge_url = responses['CHECK_CHALLENGE'].challenge_url
if challenge_url != ' ':
self.g['captchas'] += 1
if conf.CAPTCHA_KEY:
self.log.warning('{} has encountered a CAPTCHA, trying to solve', self.username)
await self.handle_captcha(challenge_url)
else:
raise CaptchaException
except KeyError:
pass
return responses
def travel_speed(self, point):
'''Fast calculation of travel speed to point'''
time_diff = max(time() - self.last_request, self.scan_delay)
distance = get_distance(self.location, point, UNIT)
# conversion from seconds to hours
speed = (distance / time_diff) * 3600
return speed
async def bootstrap_visit(self, point):
for _ in range(3):
if await self.visit(point, bootstrap=True):
return True
self.error_code = '?'
self.simulate_jitter(0.00005)
return False
async def visit(self, point, spawn_id=None, bootstrap=False):
"""Wrapper for self.visit_point - runs it a few times before giving up
Also is capable of restarting in case an error occurs.
"""
try:
try:
self.altitude = altitudes.get(point)
except KeyError:
self.altitude = await altitudes.fetch(point)
self.location = point
self.api.set_position(*self.location, self.altitude)
if not self.authenticated:
await self.login()
return await self.visit_point(point, spawn_id, bootstrap)
except ex.NotLoggedInException:
self.error_code = 'NOT AUTHENTICATED'
await sleep(1, loop=LOOP)
if not await self.login(reauth=True):
await self.swap_account(reason='reauth failed')
return await self.visit(point, spawn_id, bootstrap)
except ex.AuthException as e:
self.log.warning('Auth error on {}: {}', self.username, e)
self.error_code = 'NOT AUTHENTICATED'
await sleep(3, loop=LOOP)
await self.swap_account(reason='login failed')
except CaptchaException:
self.error_code = 'CAPTCHA'
self.g['captchas'] += 1
await sleep(1, loop=LOOP)
await self.bench_account()
except CaptchaSolveException:
self.error_code = 'CAPTCHA'
await sleep(1, loop=LOOP)
await self.swap_account(reason='solving CAPTCHA failed')
except ex.TempHashingBanException:
self.error_code = 'HASHING BAN'
self.log.error('Temporarily banned from hashing server for using invalid keys.')
await sleep(185, loop=LOOP)
except ex.BannedAccountException:
self.error_code = 'BANNED'
self.log.warning('{} is banned', self.username)
await sleep(1, loop=LOOP)
await self.remove_account()
except ex.ProxyException as e:
self.error_code = 'PROXY ERROR'
if self.multiproxy:
self.log.error('{} Swapping proxy.', e)
self.swap_proxy()
else:
self.log.error('{}', e)
except ex.TimeoutException as e:
self.log.warning('{} Giving up.', e)
except ex.NianticIPBannedException:
self.error_code = 'IP BANNED'
if self.multiproxy:
self.log.warning('Swapping out {} due to IP ban.', self.api.proxy)
self.swap_proxy()
else:
self.log.error('IP banned.')
except ex.NianticOfflineException as e:
await self.swap_account(reason='Niantic endpoint failure')
self.log.warning('{}. Giving up.', e)
except ex.ServerBusyOrOfflineException as e:
self.log.warning('{} Giving up.', e)
except ex.BadRPCException:
self.error_code = 'BAD REQUEST'
self.log.warning('{} received code 3 and is likely banned. Removing until next run.', self.username)
await self.new_account()
except ex.InvalidRPCException as e:
self.log.warning('{} Giving up.', e)
except ex.ExpiredHashKeyException as e:
self.error_code = 'KEY EXPIRED'
err = str(e)
self.log.error(err)
# print(err)
# exit()
except (ex.MalformedResponseException, ex.UnexpectedResponseException) as e:
self.log.warning('{} Giving up.', e)
self.error_code = 'MALFORMED RESPONSE'
except EmptyGMOException as e:
self.error_code = '0'
self.log.warning('Empty GetMapObjects response for {}. Speed: {:.2f}', self.username, self.speed)
except ex.HashServerException as e:
self.log.warning('{}', e)
self.error_code = 'HASHING ERROR'
except ex.AiopogoError as e:
self.log.exception(e.__class__.__name__)
self.error_code = 'AIOPOGO ERROR'
except CancelledError:
self.log.warning('Visit cancelled.')
except Exception as e:
self.log.exception('A wild {} appeared!', e.__class__.__name__)
self.error_code = 'EXCEPTION'
return False
async def visit_point(self, point, spawn_id, bootstrap,
encounter_conf=conf.ENCOUNTER, notify_conf=conf.NOTIFY,
more_points=conf.MORE_POINTS):
self.handle.cancel()
self.error_code = '?' if bootstrap else '!'
self.log.info('Visiting {0[0]:.4f},{0[1]:.4f}', point)
start = time()
cell_ids = self.get_cell_ids(point)
since_timestamp_ms = (0,) * len(cell_ids)
request = self.api.create_request()
request.get_map_objects(cell_id=cell_ids,
since_timestamp_ms=since_timestamp_ms,
latitude=point[0],
longitude=point[1])
diff = self.last_gmo + self.scan_delay - time()
if diff > 0:
await sleep(diff, loop=LOOP)
responses = await self.call(request)
self.last_gmo = self.last_request
try:
map_objects = responses['GET_MAP_OBJECTS']
if map_objects.status != 1:
error = 'GetMapObjects code for {}. Speed: {:.2f}'.format(self.username, self.speed)
self.empty_visits += 1
if self.empty_visits > 3:
reason = '{} empty visits'.format(self.empty_visits)
await self.swap_account(reason)
raise ex.UnexpectedResponseException(error)
except KeyError:
await self.random_sleep(.5, 1)
await self.get_player()
raise ex.UnexpectedResponseException('Missing GetMapObjects response.')
pokemon_seen = 0
forts_seen = 0
points_seen = 0
seen_target = not spawn_id
if conf.ITEM_LIMITS and self.bag_items >= self.item_capacity:
await self.clean_bag()
for map_cell in map_objects.map_cells:
request_time_ms = map_cell.current_timestamp_ms
for pokemon in map_cell.wild_pokemons:
pokemon_seen += 1
normalized = self.normalize_pokemon(pokemon)
seen_target = seen_target or normalized['spawn_id'] == spawn_id
if (normalized not in SIGHTING_CACHE and
normalized not in MYSTERY_CACHE):
if ((encounter_conf == 'all'
or (encounter_conf == 'some'
and normalized['pokemon_id'] in conf.ENCOUNTER_IDS))
and (self.player_level != None and (self.player_level < 2 or self.player_level >= 30))):
try:
await self.encounter(normalized, pokemon.spawn_point_id)
except CancelledError:
db_proc.add(normalized)
raise
except Exception as e:
self.log.warning('{} during encounter', e.__class__.__name__)
if notify_conf and self.notifier.eligible(normalized):
if encounter_conf and 'move_1' not in normalized:
try:
await self.encounter(normalized, pokemon.spawn_point_id)
except CancelledError:
db_proc.add(normalized)
raise
except Exception as e:
self.log.warning('{} during encounter : ', e.__class__.__name__)
LOOP.create_task(self.notifier.notify(normalized, map_objects.time_of_day))
if normalized['pokemon_id'] not in conf.TRASH_IDS:
db_proc.add(normalized)
for fort in map_cell.forts:
if not fort.enabled:
continue
forts_seen += 1
if fort.type == 1: # pokestops
if fort.HasField('lure_info'):
norm = self.normalize_lured(fort, request_time_ms)
pokemon_seen += 1
if norm not in SIGHTING_CACHE:
db_proc.add(norm)
elif conf.LURE_ON_DEMAND:
await self.add_lure_pokestop(fort)
if (self.pokestops and
self.bag_items < self.item_capacity
and (time() > self.next_spin or (self.player_level != None and self.player_level < 2))
and (not conf.SMART_THROTTLE or
self.smart_throttle(2))):
cooldown = fort.cooldown_complete_timestamp_ms
if not cooldown or time() > cooldown / 1000:
await self.spin_pokestop(fort)
if fort.id not in FORT_CACHE.pokestops:
pokestop = self.normalize_pokestop(fort)
db_proc.add(pokestop)
else:
if fort not in FORT_CACHE:
request = self.api.create_request()
request.gym_get_info(
gym_id=fort.id,
player_lat_degrees = self.location[0],
player_lng_degrees = self.location[1],
gym_lat_degrees=fort.latitude,
gym_lng_degrees=fort.longitude
)
responses = await self.call(request, action=1.2)
try:
if responses['GYM_GET_INFO'].result != 1:
self.log.warning("Failed to get gym_info {}", fort.id)
else:
gym_get_info = responses['GYM_GET_INFO']
rawFort = {}
rawFort['external_id'] = fort.id
rawFort['name'] = gym_get_info.name
rawFort['lat'] = fort.latitude
rawFort['lon'] = fort.longitude
rawFort['team'] = fort.owned_by_team
rawFort['guard_pokemon_id'] = fort.guard_pokemon_id
rawFort['last_modified'] = fort.last_modified_timestamp_ms // 1000
rawFort['is_in_battle'] = fort.is_in_battle
rawFort['slots_available'] = fort.gym_display.slots_available
rawFort['time_ocuppied'] = fort.gym_display.occupied_millis // 1000
db_proc.add(self.normalize_gym(rawFort))
if conf.SCAN_GYM_MEMBERS == True:
gym_members = gym_get_info.gym_status_and_defenders
for gym_member in gym_members.gym_defender:
raw_member = {}
raw_member['external_id'] = fort.id
raw_member['player_name'] = gym_member.trainer_public_profile.name
raw_member['player_level'] = gym_member.trainer_public_profile.level
raw_member['pokemon_id'] = gym_member.motivated_pokemon.pokemon.pokemon_id
raw_member['pokemon_cp'] = gym_member.motivated_pokemon.pokemon.cp
raw_member['move_1'] = gym_member.motivated_pokemon.pokemon.move_1
raw_member['move_2'] = gym_member.motivated_pokemon.pokemon.move_2
raw_member['individual_attack'] = gym_member.motivated_pokemon.pokemon.individual_attack
raw_member['individual_defense'] = gym_member.motivated_pokemon.pokemon.individual_defense
raw_member['individual_stamina'] = gym_member.motivated_pokemon.pokemon.individual_stamina
raw_member['time_deploy'] = gym_member.motivated_pokemon.deploy_ms // 1000
raw_member['last_modified'] = rawFort['last_modified']
db_proc.add(self.normalize_gym_member(raw_member))
except KeyError:
self.log.warning("Failed to get gym_info {}", fort.id)
if fort.HasField('raid_info'):
fort_raid = {}
fort_raid['external_id'] = fort.id
fort_raid['raid_seed'] = fort.raid_info.raid_seed
fort_raid['raid_battle_ms'] = fort.raid_info.raid_battle_ms
fort_raid['raid_spawn_ms'] = fort.raid_info.raid_spawn_ms
fort_raid['raid_end_ms'] = fort.raid_info.raid_end_ms
fort_raid['raid_level'] = fort.raid_info.raid_level
fort_raid['complete'] = fort.raid_info.complete
fort_raid['pokemon_id'] = None
fort_raid['cp'] = None
fort_raid['move_1'] = None
fort_raid['move_2'] = None
if fort.raid_info.HasField('raid_pokemon'):
fort_raid['pokemon_id'] = fort.raid_info.raid_pokemon.pokemon_id
fort_raid['cp'] = fort.raid_info.raid_pokemon.cp
fort_raid['move_1'] = fort.raid_info.raid_pokemon.move_1
fort_raid['move_2'] = fort.raid_info.raid_pokemon.move_2
normalized_raid = self.normalize_raid(fort_raid)
if normalized_raid not in RAID_CACHE:
db_proc.add(normalized_raid)
if more_points:
try:
for p in map_cell.spawn_points:
points_seen += 1
p = p.latitude, p.longitude
if spawns.have_point(p) or p not in bounds:
continue
spawns.cell_points.add(p)
except KeyError:
pass
if spawn_id:
db_proc.add({
'type': 'target',
'seen': seen_target,
'spawn_id': spawn_id})
if (conf.INCUBATE_EGGS and self.unused_incubators
and self.eggs and self.smart_throttle()):
await self.incubate_eggs()
if pokemon_seen > 0:
self.error_code = ':'
self.total_seen += pokemon_seen
self.g['seen'] += pokemon_seen
self.empty_visits = 0
else:
self.empty_visits += 1
if forts_seen == 0 and not bootstrap:
self.log.warning('Nothing seen by {}. Speed: {:.2f}', self.username, self.speed)
self.error_code = '0 SEEN'
else:
self.error_code = ','
if self.empty_visits > 3 and not bootstrap:
reason = '{} empty visits'.format(self.empty_visits)
await self.swap_account(reason)
self.visits += 1
if conf.MAP_WORKERS:
self.worker_dict.update([(self.worker_no,
(point, start, self.speed, self.total_seen,
self.visits, pokemon_seen))])
self.log.info(
'Point processed, {} Pokemon and {} forts seen!',
pokemon_seen,
forts_seen,
)
self.update_accounts_dict()
self.handle = LOOP.call_later(60, self.unset_code)
return pokemon_seen + forts_seen + points_seen
def smart_throttle(self, requests=1):
try:
# https://en.wikipedia.org/wiki/Linear_equation#Two_variables
# e.g. hashes_left > 2.25*seconds_left+7.5, spare = 0.05, max = 150
spare = conf.SMART_THROTTLE * HashServer.status['maximum']
hashes_left = HashServer.status['remaining'] - requests
usable_per_second = (HashServer.status['maximum'] - spare) / 60
seconds_left = HashServer.status['period'] - time()
return hashes_left > usable_per_second * seconds_left + spare
except (TypeError, KeyError):
return False
async def add_lure_pokestop(self, pokestop):
self.error_code = '$'
pokestop_location = pokestop.latitude, pokestop.longitude
distance = get_distance(self.location, pokestop_location)
# permitted interaction distance - 4 (for some jitter leeway)
# estimation of spinning speed limit
if distance > 36 or self.speed > SPINNING_SPEED_LIMIT:
self.error_code = '!'
return False
# randomize location up to ~1.5 meters
self.simulate_jitter(amount=0.00001)
session = SessionManager.get()
if db_proc.lure_to_add(pokestop.id):
db_proc.del_lure_to_add(pokestop.id)
request = self.api.create_request()
self.log.warning('Request add_fort_modifier ITEM_TROY_DISK {} {} {}', pokestop.id, pokestop_location[0], pokestop_location[1])
request.add_fort_modifier(
# modifier_type = ITEM_TROY_DISK,
modifier_type = 501,
fort_id = pokestop.id,
player_latitude = self.location[0],
player_longitude = self.location[1],
)
responses = await self.call(request, action=1.1)
async def spin_pokestop(self, pokestop):
self.error_code = '$'
pokestop_location = pokestop.latitude, pokestop.longitude
distance = get_distance(self.location, pokestop_location)
# permitted interaction distance - 4 (for some jitter leeway)
# estimation of spinning speed limit
if distance > 36 or self.speed > SPINNING_SPEED_LIMIT:
self.error_code = '!'
return False
# randomize location up to ~1.5 meters
self.simulate_jitter(amount=0.00001)
request = self.api.create_request()
request.fort_details(fort_id = pokestop.id,
latitude = pokestop_location[0],
longitude = pokestop_location[1])
responses = await self.call(request, action=1.2)
name = responses['FORT_DETAILS'].name
request = self.api.create_request()
request.fort_search(fort_id = pokestop.id,
player_latitude = self.location[0],
player_longitude = self.location[1],
fort_latitude = pokestop_location[0],
fort_longitude = pokestop_location[1])
responses = await self.call(request, action=2)
try:
result = responses['FORT_SEARCH'].result
except KeyError:
self.log.warning('Invalid Pokéstop spinning response.')
self.error_code = '!'
return
if result == 1:
self.log.warning('Pokestop spinned.')
self.log.info('Spun {}.', name)
elif result == 2:
self.log.info('The server said {} was out of spinning range. {:.1f}m {:.1f}{}',
name, distance, self.speed, UNIT_STRING)
elif result == 3:
self.log.warning('{} was in the cooldown period.', name)
elif result == 4:
self.log.warning('Could not spin {} because inventory was full. {}',
name, self.bag_items)
self.inventory_timestamp = 0
elif result == 5:
self.log.warning('Could not spin {} because the daily limit was reached.', name)
self.pokestops = False
else:
self.log.warning('Failed spinning {}: {}', name, result)
self.next_spin = time() + conf.SPIN_COOLDOWN
self.error_code = '!'
async def encounter(self, pokemon, spawn_id):
distance_to_pokemon = get_distance(self.location, (pokemon['lat'], pokemon['lon']))
self.error_code = '~'
if distance_to_pokemon > 48:
percent = 1 - (47 / distance_to_pokemon)
lat_change = (self.location[0] - pokemon['lat']) * percent
lon_change = (self.location[1] - pokemon['lon']) * percent
self.location = (
self.location[0] - lat_change,
self.location[1] - lon_change)
self.altitude = uniform(self.altitude - 2, self.altitude + 2)
self.api.set_position(*self.location, self.altitude)
delay_required = min((distance_to_pokemon * percent) / 8, 1.1)
else:
self.simulate_jitter()
delay_required = 1.1
await self.random_sleep(delay_required, delay_required + 1.5)
request = self.api.create_request()
request = request.encounter(encounter_id=pokemon['encounter_id'],
spawn_point_id=spawn_id,
player_latitude=self.location[0],
player_longitude=self.location[1])
responses = await self.call(request, action=2.25)
try:
pdata = responses['ENCOUNTER'].wild_pokemon.pokemon_data
if self.player_level != None and self.player_level >= 30:
pokemon['move_1'] = pdata.move_1
pokemon['move_2'] = pdata.move_2
pokemon['individual_attack'] = pdata.individual_attack
pokemon['individual_defense'] = pdata.individual_defense
pokemon['individual_stamina'] = pdata.individual_stamina
pokemon['height'] = pdata.height_m
pokemon['weight'] = pdata.weight_kg
pokemon['cp'] = pdata.cp
pokemon['level'] = calc_pokemon_level(pdata.cp_multiplier)
pokemon['gender'] = pdata.pokemon_display.gender
elif self.player_level < 2:
request = self.api.create_request()
for item, count in self.items.items():
if item == 1:
if count == 0:
# self.log.warning('Not enough pokeballs to catch pokemon', self.player_level)
break;
self.log.warning('Player lvl {} Trying to catch pokemon to get exp', self.player_level)
try:
request.catch_pokemon(
encounter_id=pokemon['encounter_id'],
pokeball=1,
normalized_reticle_size=1.950,
spawn_point_id=spawn_id,
hit_pokemon=True,
spin_modifier=0.850,
normalized_hit_position=1.0)
response = await self.call(request, action=1)
try:
catch_pokemon_status = response['CATCH_POKEMON'].status
if catch_pokemon_status == 1:
self.log.warning('Pokemon cached :)')
elif catch_pokemon_status == 3:
self.log.warning('Pokemon fled :(')
else:
self.log.warning('Pokemon escaped')
except KeyError:
self.log.error('Missing catch response.')
except ex.BadRPCException:
self.error_code = 'BAD REQUEST'
self.log.warning('{} received code 3 and is likely banned. Removing until next run.', self.username)
await self.new_account()
break;
except KeyError:
self.log.error('Missing encounter response.')
self.error_code = '!'
async def clean_bag(self):
self.error_code = '|'
rec_items = {}
limits = conf.ITEM_LIMITS
for item, count in self.items.items():
if item in limits and count > limits[item]:
discard = count - limits[item]
if discard > 50:
rec_items[item] = randint(50, discard)
else:
rec_items[item] = discard
removed = 0
for item, count in rec_items.items():
request = self.api.create_request()
request.recycle_inventory_item(item_id=item, count=count)
responses = await self.call(request, action=2)
try:
if responses['RECYCLE_INVENTORY_ITEM'].result != 1:
self.log.warning("Failed to remove item {}", item)
else:
removed += count
except KeyError:
self.log.warning("Failed to remove item {}", item)
self.log.info("Removed {} items", removed)
self.error_code = '!'
async def incubate_eggs(self):
# copy the deque, as self.call could modify it as it updates the inventory
incubators = self.unused_incubators.copy()
for egg in sorted(self.eggs.values(), key=lambda x: x.egg_km_walked_target):
if not incubators:
break
if egg.egg_incubator_id:
continue
inc = incubators.pop()
if inc.item_id == 901 or egg.egg_km_walked_target > 9:
request = self.api.create_request()
request.use_item_egg_incubator(item_id=inc.id, pokemon_id=egg.id)
responses = await self.call(request, action=4.5)
try:
ret = responses['USE_ITEM_EGG_INCUBATOR'].result
if ret == 4:
self.log.warning("Failed to use incubator because it was already in use.")
elif ret != 1:
self.log.warning("Failed to apply incubator {} on {}, code: {}",
inc.id, egg.id, ret)
except (KeyError, AttributeError):
self.log.error('Invalid response to USE_ITEM_EGG_INCUBATOR')
self.unused_incubators = incubators
async def handle_captcha(self, challenge_url):
if self.num_captchas >= conf.CAPTCHAS_ALLOWED:
self.log.error("{} encountered too many CAPTCHAs, removing.", self.username)
raise CaptchaException
self.error_code = 'C'
self.num_captchas += 1
session = SessionManager.get()
if not conf.USE_ANTICAPTCHA:
try:
params = {
'key': conf.CAPTCHA_KEY,
'method': 'userrecaptcha',
'googlekey': '6LeeTScTAAAAADqvhqVMhPpr_vB9D364Ia-1dSgK',
'pageurl': challenge_url,
'json': 1
}
async with session.post('http://2captcha.com/in.php', params=params) as resp:
response = await resp.json(loads=json_loads)
except CancelledError:
raise
except Exception as e:
self.log.error('Got an error while trying to solve CAPTCHA. '
'Check your API Key and account balance.')
raise CaptchaSolveException from e
code = response.get('request')
if response.get('status') != 1:
if code in ('ERROR_WRONG_USER_KEY', 'ERROR_KEY_DOES_NOT_EXIST', 'ERROR_ZERO_BALANCE'):
conf.CAPTCHA_KEY = None
self.log.error('2Captcha reported: {}, disabling CAPTCHA solving', code)
else:
self.log.error("Failed to submit CAPTCHA for solving: {}", code)
raise CaptchaSolveException
try:
# Get the response, retry every 5 seconds if it's not ready
params = {
'key': conf.CAPTCHA_KEY,
'action': 'get',
'id': code,
'json': 1
}
while True:
async with session.get("http://2captcha.com/res.php", params=params, timeout=20) as resp:
response = await resp.json(loads=json_loads)
if response.get('request') != 'CAPCHA_NOT_READY':
break
await sleep(5, loop=LOOP)
except CancelledError:
raise
except Exception as e:
self.log.error('Got an error while trying to solve CAPTCHA. '
'Check your API Key and account balance.')
raise CaptchaSolveException from e
token = response.get('request')
if not response.get('status') == 1:
self.log.error("Failed to get CAPTCHA response: {}", token)
raise CaptchaSolveException
else:
try:
acclient = AnticaptchaClient(conf.CAPTCHA_KEY)
actask = NoCaptchaTaskProxylessTask(challenge_url, '6LeeTScTAAAAADqvhqVMhPpr_vB9D364Ia-1dSgK')
acjob = acclient.createTask(actask)
acjob.join()
token = acjob.get_solution_response()
except AnticatpchaException as e:
self.log.error('AntiCaptcha error: {}, {}', e.error_code, e.error_description)
raise CaptchaException from e
except Exception as e:
self.log.error('Other error from anticaptcha')
raise CaptchaException from e
request = self.api.create_request()
request.verify_challenge(token=token)
await self.call(request, action=4)
await self.update_accounts_dict()
self.log.warning("Successfully solved CAPTCHA")
def simulate_jitter(self, amount=0.00002):
'''Slightly randomize location, by up to ~3 meters by default.'''
self.location = randomize_point(self.location)
self.altitude = uniform(self.altitude - 1, self.altitude + 1)
self.api.set_position(*self.location, self.altitude)
def update_accounts_dict(self):
self.account['location'] = self.location
self.account['time'] = self.last_request
self.account['inventory_timestamp'] = self.inventory_timestamp
if self.player_level:
self.account['level'] = self.player_level
try:
self.account['auth'] = self.api.auth_provider._access_token
self.account['expiry'] = self.api.auth_provider._access_token_expiry
except AttributeError:
pass
ACCOUNTS[self.username] = self.account
async def remove_account(self):
self.error_code = 'REMOVING'
self.log.warning('Removing {} due to ban.', self.username)
self.account['banned'] = True
self.update_accounts_dict()
await self.new_account()
async def bench_account(self):
self.error_code = 'BENCHING'
self.log.warning('Swapping {} due to CAPTCHA.', self.username)
self.account['captcha'] = True
self.update_accounts_dict()
self.captcha_queue.put(self.account)
await self.new_account()
async def lock_and_swap(self, minutes):
async with self.busy:
self.error_code = 'SWAPPING'
h, m = divmod(int(minutes), 60)
if h:
timestr = '{}h{}m'.format(h, m)
else:
timestr = '{}m'.format(m)
self.log.warning('Swapping {} which had been running for {}.', self.username, timestr)
self.update_accounts_dict()
self.extra_queue.put(self.account)
await self.new_account()
async def swap_account(self, reason=''):
self.error_code = 'SWAPPING'
self.log.warning('Swapping out {} because {}.', self.username, reason)
self.update_accounts_dict()
self.extra_queue.put(self.account)
await self.new_account()
async def new_account(self):
if (conf.CAPTCHA_KEY
and (conf.FAVOR_CAPTCHA or self.extra_queue.empty())
and not self.captcha_queue.empty()):
self.account = self.captcha_queue.get()
else:
try:
self.account = self.extra_queue.get_nowait()
except Empty:
self.account = await run_threaded(self.extra_queue.get)
self.username = self.account['username']
try:
self.location = self.account['location'][:2]
except KeyError:
self.location = get_start_coords(self.worker_no)
self.inventory_timestamp = self.account.get('inventory_timestamp', 0) if self.items else 0
self.player_level = self.account.get('level')
self.last_request = self.account.get('time', 0)
self.last_action = self.last_request
self.last_gmo = self.last_request
try:
self.items = self.account['items']
self.bag_items = sum(self.items.values())
except KeyError:
self.account['items'] = {}
self.items = self.account['items']
self.num_captchas = 0
self.eggs = {}
self.unused_incubators = deque()
self.initialize_api()
self.error_code = None
def unset_code(self):
self.error_code = None
@staticmethod
def normalize_pokemon(raw, spawn_int=conf.SPAWN_ID_INT):
"""Normalizes data coming from API into something acceptable by db"""
tsm = raw.last_modified_timestamp_ms
tss = round(tsm / 1000)
tth = raw.time_till_hidden_ms
norm = {
'type': 'pokemon',
'encounter_id': raw.encounter_id,
'pokemon_id': raw.pokemon_data.pokemon_id,
'lat': raw.latitude,
'lon': raw.longitude,
'spawn_id': int(raw.spawn_point_id, 16) if spawn_int else raw.spawn_point_id,
'seen': tss
}
if tth > 0 and tth <= 90000:
norm['expire_timestamp'] = round((tsm + tth) / 1000)
norm['time_till_hidden'] = tth / 1000
norm['inferred'] = False
else:
despawn = spawns.get_despawn_time(norm['spawn_id'], tss)
if despawn:
norm['expire_timestamp'] = despawn
norm['time_till_hidden'] = despawn - tss
norm['inferred'] = True
else:
norm['type'] = 'mystery'
return norm
@staticmethod
def normalize_lured(raw, now):
lure = raw.lure_info
return {
'type': 'pokemon',
'encounter_id': lure.encounter_id,
'pokemon_id': lure.active_pokemon_id,
'expire_timestamp': lure.lure_expires_timestamp_ms // 1000,
'lat': raw.latitude,
'lon': raw.longitude,
'spawn_id': 0 if conf.SPAWN_ID_INT else 'LURED',
'time_till_hidden': (lure.lure_expires_timestamp_ms - now) / 1000,
'inferred': 'pokestop'
}
@staticmethod
def normalize_gym(raw):
return {
'type': 'fort',
'external_id': raw['external_id'],
'name': raw['name'],
'lat': raw['lat'],
'lon': raw['lon'],
'team': raw['team'],
'guard_pokemon_id': raw['guard_pokemon_id'],
'last_modified': raw['last_modified'],
'is_in_battle': raw['is_in_battle'],
'slots_available': raw['slots_available'],
'time_ocuppied': raw['time_ocuppied']
}
@staticmethod
def normalize_gym_member(raw):
return {
'type': 'fort_member',
'external_id' : raw['external_id'],
'player_name' : raw['player_name'],
'player_level' : raw['player_level'],
'pokemon_id' : raw['pokemon_id'],
'pokemon_cp' : raw['pokemon_cp'],
'move_1' : raw['move_1'],
'move_2' : raw['move_2'],
'individual_attack' : raw['individual_attack'],
'individual_defense' : raw['individual_defense'],
'individual_stamina' : raw['individual_stamina'],
'time_deploy' : raw['time_deploy'],
'last_modified' : raw['last_modified']
}
@staticmethod
def normalize_raid(raw):
return {
'type': 'raid',
'external_id': raw['external_id'],
'raid_seed': raw['raid_seed'],
'raid_battle_ms': raw['raid_battle_ms'] // 1000,
'raid_spawn_ms': raw['raid_spawn_ms'] // 1000,
'raid_end_ms': raw['raid_end_ms'] // 1000,
'raid_level': raw['raid_level'],
'complete': raw['complete'],
'pokemon_id': raw['pokemon_id'],
'cp': raw['cp'],
'move_1': raw['move_1'],
'move_2': raw['move_2'],
# 'last_modified': raw.last_modified_timestamp_ms // 1000,
}
@staticmethod
def normalize_pokestop(raw):
return {
'type': 'pokestop',
'external_id': raw.id,
'lat': raw.latitude,
'lon': raw.longitude
}
@staticmethod
async def random_sleep(minimum=10.1, maximum=14, loop=LOOP):
"""Sleeps for a bit"""
await sleep(uniform(minimum, maximum), loop=loop)
@property
def start_time(self):
return self.api.start_time
@property
def status(self):
"""Returns status message to be displayed in status screen"""
if self.error_code:
msg = self.error_code
else:
msg = 'P{seen}'.format(
seen=self.total_seen
)
return '[W{worker_no}: {msg}]'.format(
worker_no=self.worker_no,
msg=msg
)
@property
def authenticated(self):
try:
return self.api.auth_provider.authenticated
except AttributeError:
return False
class HandleStub:
def cancel(self):
pass
class EmptyGMOException(Exception):
"""Raised when the GMO response is empty."""
class CaptchaException(Exception):
"""Raised when a CAPTCHA is needed."""
class CaptchaSolveException(Exception):
"""Raised when solving a CAPTCHA has failed."""
|
sebast1219/Monocle
|
monocle/worker.py
|
Python
|
mit
| 64,053
|
[
"VisIt"
] |
163f78cd063b7c8274ad056b425504a0dbe70f76fe9f3dd8add41078c8b2f533
|
# User registration view.
#
# Bonneville Power Adminstration Front-End
# Copyright (C) 2015 Shu Ping Chu
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
# views.py is the set of functions that you implement it to describe how to
# display the information of your website when we visit the url
from django.conf import settings
from django.core.mail import send_mail
from django.contrib import messages
from django.shortcuts import render
from django.shortcuts import render_to_response # allows to render tmeplate back to browser
from django.http import HttpResponse # response to that http
from django.http import HttpResponseRedirect # redirect browser to another url
from django.contrib import auth # take care using user login, logout, password
from django.core.context_processors import csrf # for security protect
from django.template import Context, RequestContext, loader # for template language ex: "{{ }}" <--variable inside
from django.template.loader import get_template # get_template helper function, to know where the templates are
from django.views.generic.base import TemplateView # how display template
from registration.forms import MyRegistrationForm # import from /forms.py
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from django.views.generic import ListView, DetailView
from django.contrib.auth import get_user_model
from registration.forms import UserProfileForm
from django.views.generic import ListView, DetailView
from django.contrib.auth import get_user_model
from registration.models import UserProfile
from django.contrib.admin.views.decorators import staff_member_required
# User registration feature required only admin login
@staff_member_required
def register_user(request):
if request.method == 'POST':
form = MyRegistrationForm(request.POST)
if form.is_valid():
save_it = form.save(commit=False)
save_it.save()
messages.add_message(request, messages.SUCCESS, "New user " + save_it.username + " was created")
# here we can add email to verfiy!!!!
subject = 'Thank you for register as BPA user from BPA project'
message = 'Hi ' + save_it.username + ', welcome to use BPA web applictation, we hope you enjoy it!'
from_email = settings.EMAIL_HOST_USER
to_list = [save_it.email, settings.EMAIL_HOST_USER]
send_mail(subject, message, from_email, to_list, fail_silently=False)
messages.success(request, 'Thanks you for regist BPA user, the email comfirmation sent')
return HttpResponseRedirect('/registration_success/')
else:
return HttpResponseRedirect('/registration_fail/')
args = {}
args.update(csrf(request))
args['form'] = MyRegistrationForm()
print
args
return render_to_response('registration/register.html', args)
def register_success(request):
return render_to_response('registration/register_success.html')
def register_fail(request):
return render_to_response('registration/register_fail.html')
# @login_required: This function will automatically at background check if user login, if not login
# it will redirectly the user to login page (force login)
# Edit user profile
@login_required
def user_profile(request):
if not request.user.is_authenticated():
return HrttpResponseRedirect('/login/')
if request.method == 'POST': # check if post
form = UserProfileForm(request.POST, instance=request.user.profile) # take exist profile and fill-in to form
if form.is_valid():
form.save()
return HttpResponseRedirect('/users/'+ request.user.username)
else:
user = request.user
profile = user.profile # trigger django to create a user profile and populate
form = UserProfileForm(instance=profile)
args = {}
args.update(csrf(request))
args['form'] = form
return render_to_response('registration/profile.html', args)
# Define a user profile detail view
class UserProfileDetailView(DetailView):
model = get_user_model()
slug_field = "username" # paramater "username" as key (aka: pk) for dynamic user url link
template_name = "registration/user_detail.html"
# always create user profile before retriving object
def get_object(self, queryset=None):
user = super(UserProfileDetailView, self).get_object(queryset)
UserProfile.objects.get_or_create(user=user)
return user
# logout user
def logout(request):
auth.logout(request)
return render_to_response('registration/logout.html')
|
jialij-pdx/bpe_capstone
|
registration/views.py
|
Python
|
gpl-2.0
| 5,435
|
[
"VisIt"
] |
3d930860ca5b69ca58a3f8402784d1b4a19a148ea6894b65ae023d5b63c05886
|
# $Id$
#
# Copyright (C) 2003-2008 Greg Landrum and Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
""" Definitions for 2D Pharmacophores from:
Gobbi and Poppinger, Biotech. Bioeng. _61_ 47-54 (1998)
"""
from rdkit import Chem
from rdkit.Chem.Pharm2D.SigFactory import SigFactory
from rdkit.Chem import ChemicalFeatures
fdef = """
DefineFeature Hydrophobic [$([C;H2,H1](!=*)[C;H2,H1][C;H2,H1][$([C;H1,H2,H3]);!$(C=*)]),$(C([C;H2,H3])([C;H2,H3])[C;H2,H3])]
Family LH
Weights 1.0
EndFeature
DefineFeature Donor [$([N;!H0;v3]),$([N;!H0;+1;v4]),$([O,S;H1;+0]),$([n;H1;+0])]
Family HD
Weights 1.0
EndFeature
DefineFeature Acceptor [$([O,S;H1;v2]-[!$(*=[O,N,P,S])]),$([O,S;H0;v2]),$([O,S;-]),$([N&v3;H1,H2]-[!$(*=[O,N,P,S])]),$([N;v3;H0]),$([n,o,s;+0]),F]
Family HA
Weights 1.0
EndFeature
DefineFeature AromaticAttachment [$([a;D3](@*)(@*)*)]
Family AR
Weights 1.0
EndFeature
DefineFeature AliphaticAttachment [$([A;D3](@*)(@*)*)]
Family RR
Weights 1.0
EndFeature
DefineFeature UnusualAtom [!#1;!#6;!#7;!#8;!#9;!#16;!#17;!#35;!#53]
Family X
Weights 1.0
EndFeature
DefineFeature BasicGroup [$([N;H2&+0][$([C,a]);!$([C,a](=O))]),$([N;H1&+0]([$([C,a]);!$([C,a](=O))])[$([C,a]);!$([C,a](=O))]),$([N;H0&+0]([C;!$(C(=O))])([C;!$(C(=O))])[C;!$(C(=O))]),$([N,n;X2;+0])]
Family BG
Weights 1.0
EndFeature
DefineFeature AcidicGroup [$([C,S](=[O,S,P])-[O;H1])]
Family AG
Weights 1.0
EndFeature
"""
defaultBins = [(2, 3), (3, 4), (4, 5), (5, 6), (6, 7), (7, 8), (8, 100)]
def _init():
global labels, patts, factory
featFactory = ChemicalFeatures.BuildFeatureFactoryFromString(fdef)
factory = SigFactory(featFactory, minPointCount=2, maxPointCount=3)
factory.SetBins(defaultBins)
factory.Init()
_init()
|
jandom/rdkit
|
rdkit/Chem/Pharm2D/Gobbi_Pharm2D.py
|
Python
|
bsd-3-clause
| 1,931
|
[
"RDKit"
] |
f2234b29c70330a63616abd1679a0acbb07a51aefa92c3d54d189a9b813f41ba
|
###############################################################################
# TransientLogSpiralPotential: a transient spiral potential
###############################################################################
import numpy
from ..util import conversion
from .planarPotential import planarPotential
_degtorad= numpy.pi/180.
class TransientLogSpiralPotential(planarPotential):
"""Class that implements a steady-state spiral potential
.. math::
\\Phi(R,\\phi) = \\frac{\\mathrm{amp}(t)}{\\alpha}\\,\\cos\\left(\\alpha\,\ln R - m\\,(\\phi-\\Omega_s\\,t-\\gamma)\\right)
where
.. math::
\\mathrm{amp}(t) = \\mathrm{amp}\\,\\times A\\,\\exp\\left(-\\frac{[t-t_0]^2}{2\\,\\sigma^2}\\right)
"""
def __init__(self,amp=1.,omegas=0.65,A=-0.035,
alpha=-7.,m=2,gamma=numpy.pi/4.,p=None,
sigma=1.,to=0.,ro=None,vo=None):
"""
NAME:
__init__
PURPOSE:
initialize a transient logarithmic spiral potential localized
around to
INPUT:
amp - amplitude to be applied to the potential (default:
1., A below)
gamma - angle between sun-GC line and the line connecting the peak of the spiral pattern at the Solar radius (in rad; default=45 degree; can be Quantity)
A - amplitude (alpha*potential-amplitude; default=0.035; can be Quantity)
omegas= - pattern speed (default=0.65; can be Quantity)
m= number of arms
to= time at which the spiral peaks (can be Quantity)
sigma= "spiral duration" (sigma in Gaussian amplitude; can be Quantity)
Either provide:
a) alpha=
b) p= pitch angle (rad; can be Quantity)
OUTPUT:
(none)
HISTORY:
2011-03-27 - Started - Bovy (NYU)
"""
planarPotential.__init__(self,amp=amp,ro=ro,vo=vo)
gamma= conversion.parse_angle(gamma)
p= conversion.parse_angle(p)
A= conversion.parse_energy(A,vo=self._vo)
omegas= conversion.parse_frequency(omegas,ro=self._ro,vo=self._vo)
to= conversion.parse_time(to,ro=self._ro,vo=self._vo)
sigma= conversion.parse_time(sigma,ro=self._ro,vo=self._vo)
self._omegas= omegas
self._A= A
self._m= m
self._gamma= gamma
self._to= to
self._sigma2= sigma**2.
if not p is None:
self._alpha= self._m/numpy.tan(p)
else:
self._alpha= alpha
self.hasC= True
def _evaluate(self,R,phi=0.,t=0.):
"""
NAME:
_evaluate
PURPOSE:
evaluate the potential at R,phi,t
INPUT:
R - Galactocentric cylindrical radius
phi - azimuth
t - time
OUTPUT:
Phi(R,phi,t)
HISTORY:
2011-03-27 - Started - Bovy (NYU)
"""
return self._A*numpy.exp(-(t-self._to)**2./2./self._sigma2)\
/self._alpha*numpy.cos(self._alpha*numpy.log(R)
-self._m*(phi-self._omegas*t-self._gamma))
def _Rforce(self,R,phi=0.,t=0.):
"""
NAME:
_Rforce
PURPOSE:
evaluate the radial force for this potential
INPUT:
R - Galactocentric cylindrical radius
phi - azimuth
t - time
OUTPUT:
the radial force
HISTORY:
2010-11-24 - Written - Bovy (NYU)
"""
return self._A*numpy.exp(-(t-self._to)**2./2./self._sigma2)\
/R*numpy.sin(self._alpha*numpy.log(R)
-self._m*(phi-self._omegas*t-self._gamma))
def _phiforce(self,R,phi=0.,t=0.):
"""
NAME:
_phiforce
PURPOSE:
evaluate the azimuthal force for this potential
INPUT:
R - Galactocentric cylindrical radius
phi - azimuth
t - time
OUTPUT:
the azimuthal force
HISTORY:
2010-11-24 - Written - Bovy (NYU)
"""
return -self._A*numpy.exp(-(t-self._to)**2./2./self._sigma2)\
/self._alpha*self._m*numpy.sin(self._alpha*numpy.log(R)
-self._m*(phi-self._omegas*t
-self._gamma))
def OmegaP(self):
"""
NAME:
OmegaP
PURPOSE:
return the pattern speed
INPUT:
(none)
OUTPUT:
pattern speed
HISTORY:
2011-10-10 - Written - Bovy (IAS)
"""
return self._omegas
|
jobovy/galpy
|
galpy/potential/TransientLogSpiralPotential.py
|
Python
|
bsd-3-clause
| 4,764
|
[
"Gaussian"
] |
75f937be93091fd77d553e815446ee0ca6b5d28a8f58da468d10cfe4922ac28d
|
import numpy as np
import inspect
from cStringIO import StringIO
from _to_native_converter import to_native_class_converter
from _inference_parameter_injector import \
_injectGenericInferenceParameterInterface
from _inference_injector import _injectGenericInferenceInterface
from _misc import defaultAccumulator
import sys
from opengmcore import index_type,value_type,label_type
from abc import ABCMeta, abstractmethod, abstractproperty
from optparse import OptionParser
import inspect
class InferenceBase:
__metaclass__ = ABCMeta
@abstractmethod
def __init__(self, gm, accumulator, parameter):
pass
@abstractmethod
def infer(self, visitor):
pass
#@abstractproperty
#def gm(self):
# pass
@abstractmethod
def arg(self, out=None):
pass
#def bound(self, out=None):
# return self.gm.evaluate(self.arg(out))
class ImplementationPack(object):
def __init__(self):
self.implDict = {}
def __hash__(self):
return self.implDict.__hash__()
def _check_consistency(self):
hyperParamsKeywords = None # as ['minStCut']
hyperParamsHelp = None # as ['minStCut implementation for graphcut']
allowedHyperParams = set() # as {['push-relabel'],['komolgorov'] }
hasInterchangeableParameter = None
# loop over all allowedHyperParams
implDict = self.implDict
for semiRingDict in implDict:
hyperParameters = None
# loop over all semi rings
for algClass, paramClass in semiRingDict:
hp = algClass.__hyperParameters()
# check if the hyper parameter (as push-relabel)
# is the same for all semi-rings
if hyperParameters is not None:
raise RuntimeError("inconsistency in hyperParameters of %s"
% algClass._algNames())
hyperParameters = hp
allowedHyperParams.add(hyperParameters)
hpK = algClass._hyperParameterKeywords()
hpH = algClass._hyperParametersHelp()
icp = algClass._hasInterchangeableParameter()
if hasInterchangeableParameter is not None:
assert (icp == hasInterchangeableParameter)
else:
hasInterchangeableParameter = icp
# check if the hyper parameter keywords are the same for all
# algorithms within the implementation pack
if (hyperParamsKeywords is not None
and hyperParamsHelp is not None):
if hpK != hyperParamsKeywords:
raise RuntimeError("inconsistency in hyperParamsKeywords of %s"
% algClass._algNames())
if hpH != hyperParamsHelp:
raise RuntimeError("inconsistency in hyperParamsHelp of %s"
% algClass._algNames())
else:
hyperParamsKeywords = hpK
hyperParamsHelp = hpH
if len(hyperParamsKeywords) != len(hyperParamsHelp):
raise RuntimeError("inconsistency in hyperParamsHelp and "
"hyperParamsKeywords of %s"
% algClass._algNames())
@ property
def allowedHyperParameters(self):
allowedHyperParams = set() # as {['push-relabel'],['komolgorov'] }
implDict = self.implDict
for hyperParameters in implDict.keys():
allowedHyperParams.add(hyperParameters)
return allowedHyperParams
@ property
def hasHyperParameters(self):
return len(self.hyperParameterKeywords) != 0
@ property
def hyperParameterKeywords(self):
try:
return dictDictElement(self.implDict)[0]._hyperParameterKeywords()
except:
raise RuntimeError(dictDictElement(self.implDict))
@ property
def hyperParametersDoc(self):
return dictDictElement(self.implDict)[0]._hyperParametersDoc()
@ property
def hyperParameters(self):
return dictDictElement(self.implDict)[0]._hyperParameters()
@ property
def hasInterchangeableParameter(self):
return dictDictElement(self.implDict)[0]._hasInterchangeableParameter()
@ property
def anyParameterClass(self):
return dictDictElement(self.implDict)[1]
def classGenerator(
classname,
inferenceClasses,
defaultHyperParams,
exampleClass,
):
""" generates a high level class for each BASIC inference algorithm:
There will be One class For Bp regardless what the operator
and accumulator is .
Also all classes with addidional templates lie
GraphCut<PushRelabel> and GraphCut<komolgorov> will glued
together to one class GraphCut
"""
#print "className ",classname
members = inspect.getmembers(exampleClass, predicate=inspect.ismethod)
def inference_init(self, gm, accumulator=None, parameter=None):
# self._old_init()
# set up basic properties
self.gm = gm
self.operator = gm.operator
if accumulator is None:
self.accumulator = defaultAccumulator(gm)
else:
self.accumulator = accumulator
self._meta_parameter = parameter
# get hyper parameter (as minStCut for graphcut, or the subsolver for
# dualdec.)
hyperParamKeywords = self._infClasses.hyperParameterKeywords
numHyperParams = len(hyperParamKeywords)
userHyperParams = [None]*numHyperParams
collectedHyperParameters = 0
# get the users hyper parameter ( if given)
if(self._meta_parameter is not None):
for hpIndex, hyperParamKeyword in enumerate(hyperParamKeywords):
if hyperParamKeyword in self._meta_parameter.kwargs:
userHyperParams[hpIndex] = self._meta_parameter.kwargs.pop(
hyperParamKeyword)
collectedHyperParameters += 1
# check if ZERO or ALL hyperParamerts have been collected
if collectedHyperParameters != 0 and collectedHyperParameters != numHyperParams:
raise RuntimeError("All or none hyper-parameter must be given")
# check if the WHOLE tuple of hyperParameters is allowed
if collectedHyperParameters != 0:
if tuple(str(x) for x in userHyperParams) not in inferenceClasses.implDict:
raise RuntimeError("%s is not an allowed hyperParameter\nAllowed hyperParameters are %s" % (
repr(userHyperParams), repr(inferenceClasses.implDict.keys())))
else:
userHyperParams = defaultHyperParams
try:
# get the selected inference class and the parameter
if(numHyperParams == 0):
self._selectedInfClass, self._selectedInfParamClass = inferenceClasses.implDict[
"__NONE__"][(self.operator, self.accumulator)]
else:
hp = tuple(str(x) for x in userHyperParams)
self._selectedInfClass, self._selectedInfParamClass = inferenceClasses.implDict[
hp][(self.operator, self.accumulator)]
except:
dictStr=str(inferenceClasses.implDict)
raise RuntimeError("given seminring (operator = %s ,accumulator = %s) is not implemented for this solver\n %s" % \
(self.operator, self.accumulator,dictStr))
if self._meta_parameter is None:
self.parameter = self._selectedInfClass._parameter()
self.parameter.set()
else:
self.parameter = to_native_class_converter(
givenValue=self._meta_parameter, nativeClass=self._selectedInfParamClass)
assert self.parameter is not None
self.inference = self._selectedInfClass(self.gm, self.parameter)
@classmethod
def get_cpp_parameter(cls, operator, accumulator, parameter):
_meta_parameter = parameter
# get hyper parameter (as minStCut for graphcut, or the subsolver for
# dualdec.)
hyperParamKeywords = inferenceClasses.hyperParameterKeywords
numHyperParams = len(hyperParamKeywords)
userHyperParams = [None]*numHyperParams
collectedHyperParameters = 0
# get the users hyper parameter ( if given)
if(_meta_parameter is not None):
for hpIndex, hyperParamKeyword in enumerate(hyperParamKeywords):
if hyperParamKeyword in _meta_parameter.kwargs:
userHyperParams[hpIndex] = _meta_parameter.kwargs.pop(
hyperParamKeyword)
collectedHyperParameters += 1
# check if ZERO or ALL hyperParamerts have been collected
if collectedHyperParameters != 0 and collectedHyperParameters != numHyperParams:
raise RuntimeError("All or none hyper-parameter must be given")
# check if the WHOLE tuple of hyperParameters is allowed
if collectedHyperParameters != 0:
if tuple(str(x) for x in userHyperParams) not in inferenceClasses.implDict:
raise RuntimeError("%s is not an allowed hyperParameter\nAllowed hyperParameters are %s" % (
repr(userHyperParams), repr(inferenceClasses.implDict.keys())))
else:
userHyperParams = defaultHyperParams
#try:
# get the selected inference class and the parameter
if(numHyperParams == 0):
_selectedInfClass, _selectedInfParamClass = inferenceClasses.implDict[
"__NONE__"][(operator, accumulator)]
else:
hp = tuple(str(x) for x in userHyperParams)
_selectedInfClass, _selectedInfParamClass = inferenceClasses.implDict[
hp][(operator, accumulator)]
#except:
# dictStr=str(inferenceClasses.implDict)
# raise RuntimeError("given seminring (operator = %s ,accumulator = %s) is not implemented for this solver\n %s" % \
# (operator, accumulator,dictStr))
if _meta_parameter is None:
cppParam = self._selectedInfClass._parameter()
cppParam.set()
else:
cppParam = to_native_class_converter(
givenValue=_meta_parameter, nativeClass=_selectedInfParamClass)
assert cppParam is not None
return cppParam
def verboseVisitor(self, printNth=1, multiline=True):
""" factory function to get a verboseVisitor:
A verboseVisitor will print some information while inference is running
**Args**:
printNth : call the visitor in each nth visit (default : ``1``)
multiline : print the information in multiple lines or in one line (default: ``True``)
**Notes**:
The usage of a verboseVisitor can slow down inference a bit
"""
return self.inference.verboseVisitor(printNth, multiline)
def timingVisitor(self, visitNth=1,reserve=0,verbose=True, multiline=True,timeLimit=float('inf')):
""" factory function to get a verboseVisitor:
A verboseVisitor will print some information while inference is running
**Args**:
visitNth : call the python visitor in each nth visit (default : ``1``)
reserve : reserve space for bounds,values,times, and iteratios (default: ``0``)
verbose : print information (default ``True``)
multiline : print the information in multiple lines or in one line (default: ``True``)
**Notes**:
The usage of a timingVisitor can slow down inference a bit
"""
return self.inference.timingVisitor(visitNth=visitNth,reserve=reserve,verbose=verbose, multiline=multiline,timeLimit=timeLimit)
def pythonVisitor(self, callbackObject, visitNth):
""" factory function to get a pythonVisitor:
A python visitor can callback to pure python within the c++ inference
**Args**:
callbackObject : python function ( or class with implemented ``__call__`` function)
visitNth : call the python function in each nth visit (default : 1)
**Notes**:
The usage of a pythonVisitor can slow down inference
"""
return self.inference.pythonVisitor(callbackObject, visitNth)
def infer(self, visitor=None, releaseGil=True):
""" start the inference
**Args**:
visitor : run inference with an optional visitor (default : None)
**Notes**:
a call of infer will unlock the GIL
"""
assert self.inference is not None
return self.inference.infer(visitor=visitor, releaseGil=releaseGil)
def arg(self, returnAsVector=False, out=None):
""" get the result of the inference
**Args**:
returnAsVector : return the result as ``opengm.LabelVector`` (default : ``False``)
To get a numpy ndarray ignore this argument or set it to ``False``
out : ``if returnAsVector==True`` a preallocated ``opengm.LabelVector`` can be passed to this function
"""
return self.inference.arg(out=out, returnAsVector=returnAsVector)
def partialOptimality(self):
"""get a numpy array of booleans which are true where the variables are optimal
"""
return self.inference.partialOptimality()
def setStartingPoint(self, labels):
""" set a starting point / start labeling
**Args**:
labels : starting point labeling
"""
numpyLabels=np.require(labels,dtype=label_type)
self.inference.setStartingPoint(numpyLabels)
def bound(self):
""" get the bound"""
return self.inference.bound()
def value(self):
""" get the value of inference.
The same as ``gm.evaluate(inf.arg())``
"""
return self.inference.value()
def reset(self):
"""
reset a inference solver (structure of gm must not change)
"""
return self.inference.reset()
def marginals(self,vis):
"""get the marginals for a subset of variable indices
Args:
vis : variable indices (for highest performance use a numpy.ndarray with ``opengm.index_type`` as dtype)
Returns :
a 2d numpy.ndarray where the first axis iterates over the variables passed by ``vis``
Notes :
All variables in ``vis`` must have the same number of labels
"""
return self.inference.marginals(vis)
def factorMarginals(self,fis):
"""get the marginals for a subset of variable indices
Args:
fis : factor indices (for highest performance use a numpy.ndarray with ``opengm.index_type`` as dtype)
Returns :
a N-d numpy.ndarray where the first axis iterates over the factors passed by ``fis``
Notes :
All factors in ``fis`` must have the same number of variables and shape
"""
return self.inference.factorMarginals(fis)
def addConstraint(self, lpVariableIndices, coefficients, lowerBound, upperBound):
"""
Add a constraint to the lp
**Args** :
lpVariableIndices : variable indices w.r.t. the lp
coefficients : coefficients of the constraint
lowerBound : lowerBound of the constraint
upperBound : upperBound of the constraint
"""
self.inference.addConstraint(
lpVariableIndices, coefficients, lowerBound, upperBound)
def addConstraints(self, lpVariableIndices, coefficients, lowerBounds, upperBounds):
"""
Add constraints to the lp
**Args** :
lpVariableIndices : variable indices w.r.t. the lp
coefficients : coefficients of the constraints
lowerBounds : lowerBounds of the constraints
upperBounds : upperBounds of the constraints
"""
self.inference.addConstraints(
lpVariableIndices, coefficients, lowerBounds, upperBounds)
def getEdgeLabeling(self):
return self.inference.getEdgeLabeling()
def lpNodeVariableIndex(self, variableIndex, label):
"""
get the lp variable index from a gm variable index and the label
**Args**:
variableIndex : variable index w.r.t. the graphical model
label : label of the variable
**Returns**:
variableIndex w.r.t. the lp
"""
return self.inference.lpNodeVariableIndex(variableIndex, label)
def lpFactorVariableIndex(self, factorIndex, labels):
"""
get the lp factor index from a gm variable index and the labeling (or the scalar index of the labeling)
**Args**:
factorIndex : factor index w.r.t. the graphical model
labels : labeling of the factor (or a scalar index of the labeling)
**Returns**:
variableIndex w.r.t. the lp of the factor (and it's labeling )
"""
return self.inference.lpFactorVariableIndex(factorIndex, labels)
def generateParamHelp():
# simple parameter
if not inferenceClasses.hasHyperParameters:
# get any parameter of this impl pack
exampleParam = inferenceClasses.anyParameterClass()
exampleParam.set()
paramHelp = exampleParam._str_spaced_()
return paramHelp
# with hyper parameter(s)
else:
# the C++ parameter does NOT CHANGE if hyper parameters change
if inferenceClasses.hasInterchangeableParameter:
# get any parameter of this impl pack
exampleParam = inferenceClasses.anyParameterClass()
exampleParam.set()
paramHelp = exampleParam._str_spaced_()
# append hyper parameter(s)
# print to string!!!
old_stdout = sys.stdout
sys.stdout = mystdout = StringIO()
# loop over all hp Keywords (usually there is max. 1 hyper parameter)
# (should it be allowed to use more than 1 hp??? right now it is!)
assert len(inferenceClasses.hyperParameterKeywords) == 1
hyperParameterKeyword = inferenceClasses.hyperParameterKeywords[0]
hyperParameterDoc = inferenceClasses.hyperParametersDoc[0]
print " * %s : %s" % (hyperParameterKeyword, hyperParameterDoc)
# loop over all hyperparamtersbound
for hyperParameters in inferenceClasses.implDict.keys():
hyperParameter = hyperParameters[0]
# get an example for this hyperparameter class
classes = inferenceClasses.implDict[hyperParameters]
# get any semi ring solver
[solverC, paramC] = dictElement(classes)
assert len(hyperParameters) == 1
if(solverC._isDefault()):
print " - ``'%s'`` (default)\n" % (hyperParameter,)
else:
print " - ``'%s'``\n" % (hyperParameter,)
sys.stdout = old_stdout
hyperParamHelp = mystdout.getvalue()
return paramHelp + "\n\n" + hyperParamHelp
# the C++ parameter DOES CHANGE if hyper parameters change
else:
# print to string!!!
old_stdout = sys.stdout
sys.stdout = mystdout = StringIO()
print "The parameter object of has internal dependencies:\n\n"
assert len(inferenceClasses.hyperParameterKeywords) == 1
hyperParameterKeyword = \
inferenceClasses.hyperParameterKeywords[0]
hyperParameterDoc = inferenceClasses.hyperParametersDoc[0]
print(" * %s : %s"
% (hyperParameterKeyword, hyperParameterDoc))
# loop over all hyperparamters
for hyperParameters in inferenceClasses.implDict.keys():
hyperParameter = hyperParameters[0]
# get an example for this hyperparameter class
classes = inferenceClasses.implDict[hyperParameters]
# get any semi ring solver
[solverC, paramC] = dictElement(classes)
assert len(hyperParameters) == 1
if(solverC._isDefault()):
print(" - ``'%s'`` (default)\n"
% (hyperParameter,))
else:
print(" - ``'%s'``\n"
% (hyperParameter,))
for hyperParameters in inferenceClasses.implDict.keys():
hyperParameter = hyperParameters[0]
# get an example for this hyperparameter class
classes = inferenceClasses.implDict[hyperParameters]
# get any semi ring solver
[solverC, paramC] = dictElement(classes)
hyperParameterKeywords = solverC._hyperParameterKeywords()
hyperParameters = solverC._hyperParameters()
assert len(hyperParameterKeywords) == 1
assert len(hyperParameters) == 1
hyperParameterKeyword = hyperParameterKeywords[0]
hyperParameter = hyperParameters[0]
print(" ``if %s == %s`` : \n\n"
% (hyperParameterKeyword, hyperParameter))
exampleParam = paramC()
exampleParam.set()
print exampleParam._str_spaced_(' ')
sys.stdout = old_stdout
return mystdout.getvalue()
# exampleClass
memberDict = {
# public members
'__init__': inference_init,
'infer': infer,
'arg': arg,
'bound': bound,
'value': value,
'setStartingPoint': setStartingPoint,
#
'gm': None,
'operator': None,
'accumulator': None,
'inference': None,
'parameter': None,
# 'protected' members
'_meta_parameter': None,
'_infClasses': inferenceClasses,
'_selectedInfClass': None,
'_selectedInfParamClass': None
}
def _generateFunction_(function,fname):
def _f_(self,*args,**kwargs):
attr = getattr(self.inference, fname)
return attr(*args,**kwargs)
_f_.__doc__=function.__doc__
return _f_
for m in members:
if m[0].startswith('_') or m[0].endswith('_') :
pass
else :
memberDict[m[0]]=_generateFunction_(m[1],m[0])
"""
if hasattr(exampleClass, "reset"):
memberDict['reset'] = reset
if hasattr(exampleClass, "verboseVisitor"):
memberDict['verboseVisitor'] = verboseVisitor
if hasattr(exampleClass, "timingVisitor"):
memberDict['timingVisitor'] = timingVisitor
if hasattr(exampleClass, "pythonVisitor"):
memberDict['pythonVisitor'] = pythonVisitor
if hasattr(exampleClass, "marginals") and hasattr(exampleClass, "factorMarginals"):
memberDict['marginals'] = marginals
memberDict['factorMarginals'] = factorMarginals
if hasattr(exampleClass, "addConstraint") and hasattr(exampleClass, "addConstraints"):
memberDict['addConstraints'] = addConstraints
memberDict['addConstraint'] = addConstraint
memberDict['lpNodeVariableIndex'] = lpNodeVariableIndex
memberDict['lpFactorVariableIndex'] = lpFactorVariableIndex
if hasattr(exampleClass, "partialOptimality") :
memberDict['partialOptimality'] = partialOptimality
if hasattr(exampleClass, "getEdgeLabeling") :
memberDict['getEdgeLabeling'] = getEdgeLabeling
"""
infClass = type(classname, (InferenceBase,), memberDict)
infClass.__init__ = inference_init
infClass.get_cpp_parameter = get_cpp_parameter
# print to string!!!
old_stdout = sys.stdout
sys.stdout = mystdout = StringIO()
print """ %s is a %s inference algorithm
**Args** :
gm : the graphical model to infere / optimize
accumulator : accumulator used for inference can be:
-``'minimizer'`` (default : ``if gm.operator is 'adder'==True:``)
-``'maximizer'`` (default : ``if gm.operator is 'multiplier'==True:``)
-``'integrator'``
Not any accmulator can be used for any solver.
Which accumulator can be used will be in the documentation soon.
parameter : parameter object of the solver
""" % (exampleClass._algName(), exampleClass._algType())
print """
**Parameter** :
%s
""" % (generateParamHelp(),)
if(exampleClass._examples() != ''):
print """ **Examples**: ::
%s
""" % (exampleClass._examples() .replace("\n", "\n "),)
if(exampleClass._guarantees() != ''):
print """ **Guarantees** :
%s
""" % (exampleClass._guarantees(),)
if(exampleClass._limitations() != ''):
print """ **Limitations** :
%s
""" % (exampleClass._limitations(),)
if(exampleClass._cite() != ''):
print """ **Cite** :
%s
""" % (exampleClass._cite().replace("\n\n", "\n\n "),)
if(exampleClass._dependencies() != ''):
print """ **Dependencies** :
%s
""" % (exampleClass._dependencies(),)
if(exampleClass._notes() != ''):
print """ **Notes** :
%s
""" % (exampleClass._notes().replace("\n\n", "\n\n "),)
sys.stdout = old_stdout
infClass.__dict__['__init__'].__doc__ = mystdout.getvalue()
return infClass, classname
def dictElement(aDict):
return aDict.itervalues().next()
def dictDictElement(dictDict):
return dictElement(dictElement(dictDict))
def _inject_interface(solverDicts):
algs = dict()
algDefaultHyperParams = dict()
exampleClasses = dict()
for solverDict, op, acc in solverDicts:
semiRing = (op, acc)
# inject raw interface to paramters and subparameters
try:
paramDict = solverDict['parameter'].__dict__
except:
raise RuntimeError(repr(solverDict))
for key in paramDict:
paramClass = paramDict[key]
if inspect.isclass(paramClass):
_injectGenericInferenceParameterInterface(
paramClass, infParam=not key.startswith('_SubParameter'),
subInfParam=key.startswith('_SubParameter'))
for key in solverDict:
elementInDict = solverDict[key]
if (inspect.isclass(elementInDict) and not key.endswith('Visitor')
and hasattr(elementInDict, '_algName')
and hasattr(elementInDict, '_parameter')):
solverClass = elementInDict
param = solverClass._parameter()
paramClass = param.__class__
# inject raw interface to inference
_injectGenericInferenceInterface(solverClass)
# Get Properties to group algorithm
algName = solverClass._algName()
hyperParamKeywords = [str(
x) for x in solverClass._hyperParameterKeywords()]
hyperParameters = tuple(str(
x) for x in solverClass._hyperParameters())
assert hyperParamKeywords is not None
exampleClasses[algName] = solverClass
# algs['GraphCut']
if algName in algs:
metaAlgs = algs[algName]
else:
implPack = ImplementationPack()
algs[algName] = implPack
metaAlgs = algs[algName]
metaAlgs = algs[algName]
if(len(hyperParameters) == 0):
if '__NONE__' in metaAlgs.implDict:
semiRingAlgs = metaAlgs.implDict["__NONE__"]
else:
metaAlgs.implDict["__NONE__"] = dict()
semiRingAlgs = metaAlgs.implDict["__NONE__"]
else:
if hyperParameters in metaAlgs.implDict:
semiRingAlgs = metaAlgs.asDict()[hyperParameters]
else:
metaAlgs.implDict[hyperParameters] = dict()
semiRingAlgs = metaAlgs.implDict[hyperParameters]
semiRingAlgs[semiRing] = (solverClass, paramClass)
if(len(hyperParameters) == 0):
metaAlgs.implDict["__NONE__"] = semiRingAlgs
else:
metaAlgs.implDict[hyperParameters] = semiRingAlgs
algs[algName] = metaAlgs
# check if this implementation is the default
if solverClass._isDefault():
algDefaultHyperParams[algName] = hyperParameters
result = []
# generate high level interface
for algName in algs.keys():
a = algs[algName]
adhp = algDefaultHyperParams[algName]
ec = exampleClasses[algName]
result.append(classGenerator(algName, a, adhp, ec))
return result
|
ilastikdev/opengm
|
src/interfaces/python/opengm/_inference_interface_generator.py
|
Python
|
mit
| 29,835
|
[
"VisIt"
] |
784766aec1171970edb539ba9b6ee4d94180545475548cdcda3c14fbe699776b
|
import vtk
import time
import numpy as np
from ddapp.timercallback import TimerCallback
class OrbitController(TimerCallback):
def __init__(self, view):
TimerCallback.__init__(self)
self.view = view
self.orbitTime = 20.0
def tick(self):
speed = 360.0 / self.orbitTime
degrees = self.elapsed * speed
self.view.camera().Azimuth(degrees)
self.view.render()
class Flyer(TimerCallback):
def __init__(self, view):
TimerCallback.__init__(self)
self.view = view
self.flyTime = 0.5
self.startTime = 0.0
self.maintainViewDirection = False
self.positionZoom = 0.7
def getCameraCopy(self):
camera = vtk.vtkCamera()
camera.DeepCopy(self.view.camera())
return camera
def zoomTo(self, newFocalPoint, newPosition=None):
self.interp = vtk.vtkCameraInterpolator()
self.interp.AddCamera(0.0, self.getCameraCopy())
c = self.getCameraCopy()
newFocalPoint = np.array(newFocalPoint)
oldFocalPoint = np.array(c.GetFocalPoint())
oldPosition = np.array(c.GetPosition())
if newPosition is None:
if self.maintainViewDirection:
newPosition = oldPosition + (newFocalPoint - oldFocalPoint)
else:
newPosition = oldPosition
newPosition += self.positionZoom*(newFocalPoint - newPosition)
#newPosition = newFocalPoint - self.positionZoom*(newFocalPoint - newPosition)
c.SetFocalPoint(newFocalPoint)
c.SetPosition(newPosition)
c.SetViewUp([0.0, 0.0, 1.0])
self.interp.AddCamera(1.0, c)
self.startTime = time.time()
self.start()
def tick(self):
elapsed = time.time() - self.startTime
t = (elapsed / float(self.flyTime)) if self.flyTime > 0 else 1.0
self.interp.InterpolateCamera(t, self.view.camera())
self.view.render()
if t >= 1.0:
return False
class RobotModelFollower(object):
def __init__(self, view, robotModel, jointController):
self.view = view
self.robotModel = robotModel
self.jointController = jointController
self.followAxes = [True, True, True]
self.callbackId = None
def start(self):
self.callbackId = self.robotModel.connectModelChanged(self.onModelChanged)
self.lastTrackPosition = np.array(self.jointController.q[:3])
def stop(self):
self.robotModel.disconnectModelChanged(self.callbackId)
def getCameraCopy(self):
camera = vtk.vtkCamera()
camera.DeepCopy(self.view.camera())
return camera
def onModelChanged(self, model):
newTrackPosition = np.array(self.jointController.q[:3])
delta = newTrackPosition - self.lastTrackPosition
for i in xrange(3):
if not self.followAxes[i]:
delta[i] = 0.0
self.lastTrackPosition = newTrackPosition
c = self.view.camera()
oldFocalPoint = np.array(c.GetFocalPoint())
oldPosition = np.array(c.GetPosition())
c.SetFocalPoint(oldFocalPoint + delta)
c.SetPosition(oldPosition + delta)
self.view.render()
|
gizatt/director
|
src/python/ddapp/cameracontrol.py
|
Python
|
bsd-3-clause
| 3,247
|
[
"VTK"
] |
e1d48274407bf932b5ec7078f74401ebe2e48221d2616dc8016aa9fbdd628076
|
# Standard library imports.
from os.path import isfile
# Enthought library imports.
from enthought.pyface import FileDialog, OK
# Local imports
from enthought.mayavi.script import get_imayavi
from enthought.mayavi.core.common import error
from enthought.mayavi.action.common import WorkbenchAction, get_imayavi
######################################################################
# `OpenCitcomSVtkFile` class.
######################################################################
class OpenCitcomSVTKFILE(WorkbenchAction):
""" An action that opens a new VTK file. """
###########################################################################
# 'Action' interface.
###########################################################################
def perform(self):
""" Performs the action. """
wildcard = 'VTK files (*.vtk)|*.vtk|' + FileDialog.WILDCARD_ALL
parent = self.window.control
dialog = FileDialog(parent=parent,
title='Open CitcomS VTK file',
action='open', wildcard=wildcard
)
if dialog.open() == OK:
if not isfile(dialog.path):
error("File '%s' does not exist!"%dialog.path, parent)
return
from enthought.mayavi.plugins.CitcomS_vtk_file_reader import CitcomSVTKFileReader
r = CitcomSVTKFileReader()
r.initialize(dialog.path)
mv = get_imayavi(self.window)
mv.add_source(r)
######################################################################
# `OpenCitcomSVtkFile` class.
######################################################################
class OpenCitcomSHDFFILE(WorkbenchAction):
""" An action that opens a new VTK file. """
###########################################################################
# 'Action' interface.
###########################################################################
def perform(self):
""" Performs the action. """
wildcard = 'HDF files (*.h5)|*.h5|' + FileDialog.WILDCARD_ALL
parent = self.window.control
dialog = FileDialog(parent=parent,
title='Open CitcomS H5 file',
action='open', wildcard=wildcard
)
if dialog.open() == OK:
if not isfile(dialog.path):
error("File '%s' does not exist!"%dialog.path, parent)
return
from enthought.mayavi.plugins.CitcomS_hdf_file_reader import CitcomSHDFFileReader
r = CitcomSHDFFileReader()
r.initialize(dialog.path)
mv = get_imayavi(self.window)
mv.add_source(r)
|
geodynamics/citcoms
|
visual/Mayavi2/original_plugins/plugins/OpenCitcomSFILES.py
|
Python
|
gpl-2.0
| 2,756
|
[
"Mayavi",
"VTK"
] |
3e0f4e5df63f837a62d54650771bee9c9891fcb49fb359ed4bcac69826e6c1ac
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import time
# !! This is the configuration of Nikola. !! #
# !! You should edit it to your liking. !! #
# ! Some settings can be different in different languages.
# ! A comment stating (translatable) is used to denote those.
# ! There are two ways to specify a translatable setting:
# ! (a) BLOG_TITLE = "My Blog"
# ! (b) BLOG_TITLE = {"en": "My Blog", "es": "Mi Blog"}
# ! Option (a) is used when you don't want that setting translated.
# ! Option (b) is used for settings that are different in different languages.
# Data about this site
BLOG_AUTHOR = "Sef Kloninger" # (translatable)
BLOG_TITLE = "sef.kloninger.com" # (translatable)
# This is the main URL for your site. It will be used
# in a prominent link
SITE_URL = "http://sef.kloninger.com/"
# This is the URL where nikola's output will be deployed.
# If not set, defaults to SITE_URL
# BASE_URL = "http://sef.kloninger.com"
BLOG_EMAIL = "sefklon@gmail.com"
BLOG_DESCRIPTION = "None"
# Nikola is multilingual!
#
# Currently supported languages are:
#
# en English
# ar Arabic
# bg Bulgarian
# ca Catalan
# cs Czech [ALTERNATIVELY cz]
# da Danish
# de German
# el Greek [NOT gr]
# eo Esperanto
# es Spanish
# et Estonian
# eu Basque
# fa Persian
# fi Finnish
# fr French
# hi Hindi
# hr Croatian
# id Indonesian
# it Italian
# ja Japanese [NOT jp]
# ko Korean
# nb Norwegian Bokmål
# nl Dutch
# pl Polish
# pt_br Portuguese (Brasil)
# ru Russian
# sk Slovak
# sl Slovene
# sr Serbian (Cyrillic)
# sv Swedish
# tr Turkish [NOT tr_TR]
# ur Urdu
# zh_cn Chinese (Simplified)
#
# If you want to use Nikola with a non-supported language you have to provide
# a module containing the necessary translations
# (cf. the modules at nikola/data/themes/base/messages/).
# If a specific post is not translated to a language, then the version
# in the default language will be shown instead.
# What is the default language?
DEFAULT_LANG = "en"
# What other languages do you have?
# The format is {"translationcode" : "path/to/translation" }
# the path will be used as a prefix for the generated pages location
TRANSLATIONS = {
DEFAULT_LANG: "",
# Example for another language:
# "es": "./es",
}
# What will translated input files be named like?
# If you have a page something.rst, then something.pl.rst will be considered
# its Polish translation.
# (in the above example: path == "something", ext == "rst", lang == "pl")
# this pattern is also used for metadata:
# something.meta -> something.pl.meta
TRANSLATIONS_PATTERN = "{path}.{lang}.{ext}"
# Links for the sidebar / navigation bar. (translatable)
# This is a dict. The keys are languages, and values are tuples.
#
# For regular links:
# ('http://getnikola.com/', 'Nikola Homepage')
#
# For submenus:
# (
# (
# ('http://apple.com/', 'Apple'),
# ('http://orange.com/', 'Orange'),
# ),
# 'Fruits'
# )
#
# WARNING: Support for submenus is theme-dependent.
# Only one level of submenus is supported.
# WARNING: Some themes, including the default Bootstrap 3 theme,
# may present issues if the menu is too large.
# (in bootstrap3, the navbar can grow too large and cover contents.)
# WARNING: If you link to directories, make sure to follow
# ``STRIP_INDEXES``. If it’s set to ``True``, end your links
# with a ``/``, otherwise end them with ``/index.html`` — or
# else they won’t be highlighted when active.
NAVIGATION_LINKS = {
DEFAULT_LANG: (
('/stories/about.html', 'About'),
# ('/categories/index.html', 'Tags'),
# ('/rss.xml', 'RSS'),
('https://twitter.com/sefk', '@sefk'),
('https://github.com/sefk', 'GitHub'),
('https://rawgithub.com/sefk/sef-resume/master/sef-kloninger-resume.html', 'Resume'),
('https://rawgithub.com/sefk/sef-resume/master/sef-kloninger-resume.pdf', 'PDF'),
('/stories/tools.html', 'Tools'),
('/archive.html', 'Archives'),
),
}
# Name of the theme to use.
THEME = "bootstrap3"
# Below this point, everything is optional
# Post's dates are considered in UTC by default, if you want to use
# another time zone, please set TIMEZONE to match. Check the available
# list from Wikipedia:
# http://en.wikipedia.org/wiki/List_of_tz_database_time_zones
# (e.g. 'Europe/Zurich')
# Also, if you want to use a different time zone in some of your posts,
# you can use the ISO 8601/RFC 3339 format (ex. 2012-03-30T23:00:00+02:00)
TIMEZONE = "America/Los_Angeles"
# If you want to use ISO 8601 (also valid RFC 3339) throughout Nikola
# (especially in new_post), set this to True.
# Note that this does not affect DATE_FORMAT.
# FORCE_ISO8601 = False
# Date format used to display post dates.
# (str used by datetime.datetime.strftime)
# DATE_FORMAT = '%Y-%m-%d %H:%M'
# Date format used to display post dates, if local dates are used.
# (str used by moment.js)
# JS_DATE_FORMAT = 'YYYY-MM-DD HH:mm'
# Date fanciness.
#
# 0 = using DATE_FORMAT and TIMEZONE
# 1 = using JS_DATE_FORMAT and local user time (via moment.js)
# 2 = using a string like “2 days ago”
#
# Your theme must support it, bootstrap and bootstrap3 already do.
DATE_FANCINESS = 2
# While Nikola can select a sensible locale for each language,
# sometimes explicit control can come handy.
# In this file we express locales in the string form that
# python's locales will accept in your OS, by example
# "en_US.utf8" in Unix-like OS, "English_United States" in Windows.
# LOCALES = dict mapping language --> explicit locale for the languages
# in TRANSLATIONS. You can omit one or more keys.
# LOCALE_FALLBACK = locale to use when an explicit locale is unavailable
# LOCALE_DEFAULT = locale to use for languages not mentioned in LOCALES; if
# not set the default Nikola mapping is used.
# POSTS and PAGES contains (wildcard, destination, template) tuples.
#
# The wildcard is used to generate a list of reSt source files
# (whatever/thing.txt).
#
# That fragment could have an associated metadata file (whatever/thing.meta),
# and optionally translated files (example for Spanish, with code "es"):
# whatever/thing.es.txt and whatever/thing.es.meta
#
# This assumes you use the default TRANSLATIONS_PATTERN.
#
# From those files, a set of HTML fragment files will be generated:
# cache/whatever/thing.html (and maybe cache/whatever/thing.html.es)
#
# These files are combined with the template to produce rendered
# pages, which will be placed at
# output / TRANSLATIONS[lang] / destination / pagename.html
#
# where "pagename" is the "slug" specified in the metadata file.
#
# The difference between POSTS and PAGES is that POSTS are added
# to feeds and are considered part of a blog, while PAGES are
# just independent HTML pages.
#
POSTS = (
("posts/*.md", "posts", "post.tmpl"),
("posts/*.html", "posts", "post.tmpl"),
)
PAGES = (
("stories/*.md", "stories", "story.tmpl"),
("stories/*.html", "stories", "story.tmpl"),
)
# One or more folders containing files to be copied as-is into the output.
# The format is a dictionary of {source: relative destination}.
# Default is:
# FILES_FOLDERS = {'files': ''}
# Which means copy 'files' into 'output'
# One or more folders containing listings to be processed and stored into
# the output. The format is a dictionary of {source: relative destination}.
# Default is:
# LISTINGS_FOLDERS = {'listings': 'listings'}
# Which means process listings from 'listings' into 'output/listings'
# A mapping of languages to file-extensions that represent that language.
# Feel free to add or delete extensions to any list, but don't add any new
# compilers unless you write the interface for it yourself.
#
# 'rest' is reStructuredText
# 'markdown' is MarkDown
# 'html' assumes the file is html and just copies it
COMPILERS = {
"rest": ('.rst', '.txt'),
"markdown": ('.md', '.mdown', '.markdown'),
"textile": ('.textile',),
"txt2tags": ('.t2t',),
"bbcode": ('.bb',),
"wiki": ('.wiki',),
"ipynb": ('.ipynb',),
"html": ('.html', '.htm'),
# PHP files are rendered the usual way (i.e. with the full templates).
# The resulting files have .php extensions, making it possible to run
# them without reconfiguring your server to recognize them.
"php": ('.php',),
# Pandoc detects the input from the source filename
# but is disabled by default as it would conflict
# with many of the others.
# "pandoc": ('.rst', '.md', '.txt'),
}
# Create by default posts in one file format?
# Set to False for two-file posts, with separate metadata.
# ONE_FILE_POSTS = True
# If this is set to True, the DEFAULT_LANG version will be displayed for
# untranslated posts.
# If this is set to False, then posts that are not translated to a language
# LANG will not be visible at all in the pages in that language.
# Formerly known as HIDE_UNTRANSLATED_POSTS (inverse)
# SHOW_UNTRANSLATED_POSTS = True
# Nikola supports logo display. If you have one, you can put the URL here.
# Final output is <img src="LOGO_URL" id="logo" alt="BLOG_TITLE">.
# The URL may be relative to the site root.
# LOGO_URL = ''
# If you want to hide the title of your website (for example, if your logo
# already contains the text), set this to False.
# SHOW_BLOG_TITLE = True
# Writes tag cloud data in form of tag_cloud_data.json.
# Warning: this option will change its default value to False in v8!
WRITE_TAG_CLOUD = False
# Paths for different autogenerated bits. These are combined with the
# translation paths.
# Final locations are:
# output / TRANSLATION[lang] / TAG_PATH / index.html (list of tags)
# output / TRANSLATION[lang] / TAG_PATH / tag.html (list of posts for a tag)
# output / TRANSLATION[lang] / TAG_PATH / tag.xml (RSS feed for a tag)
# TAG_PATH = "categories"
# If TAG_PAGES_ARE_INDEXES is set to True, each tag's page will contain
# the posts themselves. If set to False, it will be just a list of links.
# TAG_PAGES_ARE_INDEXES = False
# Set descriptions for tag pages to make them more interesting. The
# default is no description. The value is used in the meta description
# and displayed underneath the tag list or index page’s title.
# TAG_PAGES_DESCRIPTIONS = {
# DEFAULT_LANG: {
# "blogging": "Meta-blog posts about blogging about blogging.",
# "open source": "My contributions to my many, varied, ever-changing, and eternal libre software projects."
# },
#}
# If you do not want to display a tag publicly, you can mark it as hidden.
# The tag will not be displayed on the tag list page, the tag cloud and posts.
# Tag pages will still be generated.
HIDDEN_TAGS = ['mathjax']
# Only include tags on the tag list/overview page if there are at least
# TAGLIST_MINIMUM_POSTS number of posts or more with every tag. Every tag
# page is still generated, linked from posts, and included in the sitemap.
# However, more obscure tags can be hidden from the tag index page.
# TAGLIST_MINIMUM_POSTS = 1
# Final locations are:
# output / TRANSLATION[lang] / CATEGORY_PATH / index.html (list of categories)
# output / TRANSLATION[lang] / CATEGORY_PATH / CATEGORY_PREFIX category.html (list of posts for a category)
# output / TRANSLATION[lang] / CATEGORY_PATH / CATEGORY_PREFIX category.xml (RSS feed for a category)
# CATEGORY_PATH = "categories"
# CATEGORY_PREFIX = "cat_"
# If CATEGORY_PAGES_ARE_INDEXES is set to True, each category's page will contain
# the posts themselves. If set to False, it will be just a list of links.
# CATEGORY_PAGES_ARE_INDEXES = False
# If you do not want to display a category publicly, you can mark it as hidden.
# The category will not be displayed on the category list page.
# Category pages will still be generated.
# HIDDEN_CATEGORIES = []
# Set descriptions for category pages to make them more interesting. The
# default is no description. The value is used in the meta description
# and displayed underneath the category list or index page’s title.
# CATEGORY_PAGES_DESCRIPTIONS = {
# DEFAULT_LANG: {
# "blogging": "Meta-blog posts about blogging about blogging.",
# "open source": "My contributions to my many, varied, ever-changing, and eternal libre software projects."
# },
#}
# Final location for the main blog page and sibling paginated pages is
# output / TRANSLATION[lang] / INDEX_PATH / index-*.html
# INDEX_PATH = ""
# Create per-month archives instead of per-year
# CREATE_MONTHLY_ARCHIVE = False
# Create one large archive instead of per-year
CREATE_SINGLE_ARCHIVE = True
# Create year, month, and day archives each with a (long) list of posts
# (overrides both CREATE_MONTHLY_ARCHIVE and CREATE_SINGLE_ARCHIVE)
# CREATE_FULL_ARCHIVES = False
# If monthly archives or full archives are created, adds also one archive per day
# CREATE_DAILY_ARCHIVE = False
# Final locations for the archives are:
# output / TRANSLATION[lang] / ARCHIVE_PATH / ARCHIVE_FILENAME
# output / TRANSLATION[lang] / ARCHIVE_PATH / YEAR / index.html
# output / TRANSLATION[lang] / ARCHIVE_PATH / YEAR / MONTH / index.html
# output / TRANSLATION[lang] / ARCHIVE_PATH / YEAR / MONTH / DAY / index.html
# ARCHIVE_PATH = ""
# ARCHIVE_FILENAME = "archive.html"
# If ARCHIVES_ARE_INDEXES is set to True, each archive page which contains a list
# of posts will contain the posts themselves. If set to False, it will be just a
# list of links.
# ARCHIVES_ARE_INDEXES = False
# URLs to other posts/pages can take 3 forms:
# rel_path: a relative URL to the current page/post (default)
# full_path: a URL with the full path from the root
# absolute: a complete URL (that includes the SITE_URL)
# URL_TYPE = 'rel_path'
# Final location for the blog main RSS feed is:
# output / TRANSLATION[lang] / RSS_PATH / rss.xml
# RSS_PATH = ""
# Number of posts in RSS feeds
# FEED_LENGTH = 10
# Slug the Tag URL easier for users to type, special characters are
# often removed or replaced as well.
# SLUG_TAG_PATH = True
# A list of redirection tuples, [("foo/from.html", "/bar/to.html")].
#
# A HTML file will be created in output/foo/from.html that redirects
# to the "/bar/to.html" URL. notice that the "from" side MUST be a
# relative URL.
#
# If you don't need any of these, just set to []
REDIRECTIONS = [
(u'2012/03/those-little-utilities/index.html', u'/posts/201203those-little-utilities.html'),
(u'2012/03/too-many-clouds/index.html', u'/posts/201203too-many-clouds.html'),
(u'2012/04/measuring-an-engineering-manager/index.html', u'/posts/201204measuring-an-engineering-manager.html'),
(u'2012/04/my-sabbatical/index.html', u'/posts/201204my-sabbatical.html'),
(u'2012/04/browsers-for-web-apps/index.html', u'/posts/201204browsers-for-web-apps.html'),
(u'2012/05/danny-lewin-42nd-birthday/index.html', u'/posts/201205danny-lewin-42nd-birthday.html'),
(u'2012/05/i-absorb-uncertainty/index.html', u'/posts/201205i-absorb-uncertainty.html'),
(u'2012/05/fizzbuzz-for-managers/index.html', u'/posts/201205fizzbuzz-for-managers.html'),
(u'2012/05/engineering-culture-litmus-tests/index.html', u'/posts/201205engineering-culture-litmus-tests.html'),
(u'2012/05/concentration/index.html', u'/posts/201205concentration.html'),
(u'2012/06/two-things-at-once/index.html', u'/posts/201206two-things-at-once.html'),
(u'2012/07/on-line-education/index.html', u'/posts/201207on-line-education.html'),
(u'2012/07/taking-down-100000-sites/index.html', u'/posts/201207taking-down-100000-sites.html'),
(u'2012/08/wip-folders-with-ls/index.html', u'/posts/201208wip-folders-with-ls.html'),
(u'2012/10/halloween-candy-data/index.html', u'/posts/201210halloween-candy-data.html'),
(u'2013/01/why-so-long/index.html', u'/posts/201301why-so-long.html'),
(u'2013/03/online-ed-retention/index.html', u'/posts/201303online-ed-retention.html'),
(u'2013/04/coding-on-a-flight/index.html', u'/posts/201304coding-on-a-flight.html'),
(u'2013/06/launch-day/index.html', u'/posts/201306launch-day.html'),
(u'2013/09/knr-c-label/index.html', u'/posts/201309knr-c-label.html'),
(u'2013/11/halloween-2013/index.html', u'/posts/201311halloween-2013.html'),
(u'2013/11/seven-things/index.html', u'/posts/201311seven-things.html'),
(u'2013/12/airmail/index.html', u'/posts/201312airmail.html'),
]
# Presets of commands to execute to deploy. Can be anything, for
# example, you may use rsync:
# "rsync -rav --delete output/ joe@my.site:/srv/www/site"
# And then do a backup, or run `nikola ping` from the `ping`
# plugin (`nikola plugin -i ping`). Or run `nikola check -l`.
# You may also want to use github_deploy (see below).
# You can define multiple presets and specify them as arguments
# to `nikola deploy`. If no arguments are specified, a preset
# named `default` will be executed. You can use as many presets
# in a `nikola deploy` command as you like.
# DEPLOY_COMMANDS = {
# 'default': [
# "rsync -rav --delete output/ joe@my.site:/srv/www/site",
# ]
# }
# For user.github.io OR organization.github.io pages, the DEPLOY branch
# MUST be 'master', and 'gh-pages' for other repositories.
# GITHUB_SOURCE_BRANCH = 'master'
# GITHUB_DEPLOY_BRANCH = 'gh-pages'
# The name of the remote where you wish to push to, using github_deploy.
# GITHUB_REMOTE_NAME = 'origin'
# Where the output site should be located
# If you don't use an absolute path, it will be considered as relative
# to the location of conf.py
OUTPUT_FOLDER = 'output'
# where the "cache" of partial generated content should be located
# default: 'cache'
# CACHE_FOLDER = 'cache'
# Filters to apply to the output.
# A directory where the keys are either: a file extensions, or
# a tuple of file extensions.
#
# And the value is a list of commands to be applied in order.
#
# Each command must be either:
#
# A string containing a '%s' which will
# be replaced with a filename. The command *must* produce output
# in place.
#
# Or:
#
# A python callable, which will be called with the filename as
# argument.
#
# By default, only .php files uses filters to inject PHP into
# Nikola’s templates. All other filters must be enabled through FILTERS.
#
# Many filters are shipped with Nikola. A list is available in the manual:
# <http://getnikola.com/handbook.html#post-processing-filters>
#
# from nikola import filters
# FILTERS = {
# ".html": [filters.typogrify],
# ".js": [filters.closure_compiler],
# ".jpg": ["jpegoptim --strip-all -m75 -v %s"],
# }
# Expert setting! Create a gzipped copy of each generated file. Cheap server-
# side optimization for very high traffic sites or low memory servers.
# GZIP_FILES = False
# File extensions that will be compressed
# GZIP_EXTENSIONS = ('.txt', '.htm', '.html', '.css', '.js', '.json', '.xml')
# Use an external gzip command? None means no.
# Example: GZIP_COMMAND = "pigz -k {filename}"
# GZIP_COMMAND = None
# Make sure the server does not return a "Accept-Ranges: bytes" header for
# files compressed by this option! OR make sure that a ranged request does not
# return partial content of another representation for these resources. Do not
# use this feature if you do not understand what this means.
# Compiler to process LESS files.
# LESS_COMPILER = 'lessc'
# A list of options to pass to the LESS compiler.
# Final command is: LESS_COMPILER LESS_OPTIONS file.less
# LESS_OPTIONS = []
# Compiler to process Sass files.
# SASS_COMPILER = 'sass'
# A list of options to pass to the Sass compiler.
# Final command is: SASS_COMPILER SASS_OPTIONS file.s(a|c)ss
# SASS_OPTIONS = []
# #############################################################################
# Image Gallery Options
# #############################################################################
# One or more folders containing galleries. The format is a dictionary of
# {"source": "relative_destination"}, where galleries are looked for in
# "source/" and the results will be located in
# "OUTPUT_PATH/relative_destination/gallery_name"
# Default is:
# GALLERY_FOLDERS = {"galleries": "galleries"}
# More gallery options:
# THUMBNAIL_SIZE = 180
# MAX_IMAGE_SIZE = 1280
# USE_FILENAME_AS_TITLE = True
# EXTRA_IMAGE_EXTENSIONS = []
#
# If set to False, it will sort by filename instead. Defaults to True
# GALLERY_SORT_BY_DATE = True
#
# Folders containing images to be used in normal posts or
# pages. Images will be scaled down according to IMAGE_THUMBNAIL_SIZE
# and MAX_IMAGE_SIZE options, but will have to be referenced manually
# to be visible on the site. The format is a dictionary of {source:
# relative destination}.
#
# IMAGE_FOLDERS = {'images': ''}
# IMAGE_THUMBNAIL_SIZE = 400
# #############################################################################
# HTML fragments and diverse things that are used by the templates
# #############################################################################
# Data about post-per-page indexes.
# INDEXES_PAGES defaults to ' old posts, page %d' or ' page %d' (translated),
# depending on the value of INDEXES_PAGES_MAIN.
#
# (translatable) If the following is empty, defaults to BLOG_TITLE:
# INDEXES_TITLE = ""
#
# (translatable) If the following is empty, defaults to ' [old posts,] page %d' (see above):
# INDEXES_PAGES = ""
#
# If the following is True, INDEXES_PAGES is also displayed on the main (the
# newest) index page (index.html):
# INDEXES_PAGES_MAIN = False
#
# If the following is True, index-1.html has the oldest posts, index-2.html the
# second-oldest posts, etc., and index.html has the newest posts. This ensures
# that all posts on index-x.html will forever stay on that page, now matter how
# many new posts are added.
# If False, index-1.html has the second-newest posts, index-2.html the third-newest,
# and index-n.html the oldest posts. When this is active, old posts can be moved
# to other index pages when new posts are added.
# INDEXES_STATIC = True
#
# (translatable) If PRETTY_URLS is set to True, this setting will be used to create
# more pretty URLs for index pages, such as page/2/index.html instead of index-2.html.
# Valid values for this settings are:
# * False,
# * a list or tuple, specifying the path to be generated,
# * a dictionary mapping languages to lists or tuples.
# Every list or tuple must consist of strings which are used to combine the path;
# for example:
# ['page', '{number}', '{index_file}']
# The replacements
# {number} --> (logical) page number;
# {old_number} --> the page number inserted into index-n.html before (zero for
# the main page);
# {index_file} --> value of option INDEX_FILE
# are made.
# Note that in case INDEXES_PAGES_MAIN is set to True, a redirection will be created
# for the full URL with the page number of the main page to the normal (shorter) main
# page URL.
# INDEXES_PRETTY_PAGE_URL = False
# Color scheme to be used for code blocks. If your theme provides
# "assets/css/code.css" this is ignored.
# Can be any of autumn borland bw colorful default emacs friendly fruity manni
# monokai murphy native pastie perldoc rrt tango trac vim vs
# CODE_COLOR_SCHEME = 'default'
# If you use 'site-reveal' theme you can select several subthemes
# THEME_REVEAL_CONFIG_SUBTHEME = 'sky'
# You can also use: beige/serif/simple/night/default
# Again, if you use 'site-reveal' theme you can select several transitions
# between the slides
# THEME_REVEAL_CONFIG_TRANSITION = 'cube'
# You can also use: page/concave/linear/none/default
# FAVICONS contains (name, file, size) tuples.
# Used for create favicon link like this:
# <link rel="name" href="file" sizes="size"/>
# FAVICONS = {
# ("icon", "/favicon.ico", "16x16"),
# ("icon", "/icon_128x128.png", "128x128"),
# }
# Show only teasers in the index pages? Defaults to False.
# INDEX_TEASERS = False
# HTML fragments with the Read more... links.
# The following tags exist and are replaced for you:
# {link} A link to the full post page.
# {read_more} The string “Read more” in the current language.
# {reading_time} An estimate of how long it will take to read the post.
# {remaining_reading_time} An estimate of how long it will take to read the post, sans the teaser.
# {min_remaining_read} The string “{remaining_reading_time} min remaining to read” in the current language.
# {paragraph_count} The amount of paragraphs in the post.
# {remaining_paragraph_count} The amount of paragraphs in the post, sans the teaser.
# {{ A literal { (U+007B LEFT CURLY BRACKET)
# }} A literal } (U+007D RIGHT CURLY BRACKET)
# 'Read more...' for the index page, if INDEX_TEASERS is True (translatable)
INDEX_READ_MORE_LINK = '<p class="more"><a href="{link}">{read_more}…</a></p>'
# 'Read more...' for the RSS_FEED, if RSS_TEASERS is True (translatable)
RSS_READ_MORE_LINK = '<p><a href="{link}">{read_more}…</a> ({min_remaining_read})</p>'
# Append a URL query to the RSS_READ_MORE_LINK and the //rss/item/link in
# RSS feeds. Minimum example for Piwik "pk_campaign=rss" and Google Analytics
# "utm_source=rss&utm_medium=rss&utm_campaign=rss". Advanced option used for
# traffic source tracking.
RSS_LINKS_APPEND_QUERY = False
# A HTML fragment describing the license, for the sidebar.
# (translatable)
LICENSE = ""
# I recommend using the Creative Commons' wizard:
# http://creativecommons.org/choose/
LICENSE = """
<a rel="license" href="http://creativecommons.org/licenses/by-nc-sa/3.0/us/">
<img alt="Creative Commons License BY-NC-SA"
style="border-width:0; margin-bottom:12px;"
src="http://i.creativecommons.org/l/by-nc-sa/2.5/ar/88x31.png"></a>"""
# A small copyright notice for the page footer (in HTML).
# Default is ''
CONTENT_FOOTER = '<p align=center>Contents © {date} <a href="mailto:{email}">{author}</a> ' + \
' ' + \
'{license}' + \
' ' + \
'Powered by <a href="http://getnikola.com" rel="nofollow">Nikola</a> ' + \
'and <a href="http://github.com" rel="nofollow">GitHub</a> ' + \
'</p>'
CONTENT_FOOTER = CONTENT_FOOTER.format(email=BLOG_EMAIL,
author=BLOG_AUTHOR,
date=time.gmtime().tm_year,
license=LICENSE)
# Things that will be passed to CONTENT_FOOTER.format(). This is done
# for translatability, as dicts are not formattable. Nikola will
# intelligently format the setting properly.
# The setting takes a dict. The keys are languages. The values are
# tuples of tuples of positional arguments and dicts of keyword arguments
# to format(). For example, {'en': (('Hello'), {'target': 'World'})}
# results in CONTENT_FOOTER['en'].format('Hello', target='World').
# WARNING: If you do not use multiple languages with CONTENT_FOOTER, this
# still needs to be a dict of this format. (it can be empty if you
# do not need formatting)
# (translatable)
CONTENT_FOOTER_FORMATS = {
DEFAULT_LANG: (
(),
{
"email": BLOG_EMAIL,
"author": BLOG_AUTHOR,
"date": time.gmtime().tm_year,
"license": LICENSE
}
)
}
# To use comments, you can choose between different third party comment
# systems. The following comment systems are supported by Nikola:
# disqus, facebook, googleplus, intensedebate, isso, livefyre, muut
# You can leave this option blank to disable comments.
COMMENT_SYSTEM = "disqus"
# And you also need to add your COMMENT_SYSTEM_ID which
# depends on what comment system you use. The default is
# "nikolademo" which is a test account for Disqus. More information
# is in the manual.
COMMENT_SYSTEM_ID = "sefkloninger"
# Enable annotations using annotateit.org?
# If set to False, you can still enable them for individual posts and pages
# setting the "annotations" metadata.
# If set to True, you can disable them for individual posts and pages using
# the "noannotations" metadata.
# ANNOTATIONS = False
# Create index.html for story folders?
# STORY_INDEX = False
# Enable comments on story pages?
# COMMENTS_IN_STORIES = False
# Enable comments on picture gallery pages?
# COMMENTS_IN_GALLERIES = False
# What file should be used for directory indexes?
# Defaults to index.html
# Common other alternatives: default.html for IIS, index.php
# INDEX_FILE = "index.html"
# If a link ends in /index.html, drop the index.html part.
# http://mysite/foo/bar/index.html => http://mysite/foo/bar/
# (Uses the INDEX_FILE setting, so if that is, say, default.html,
# it will instead /foo/default.html => /foo)
# (Note: This was briefly STRIP_INDEX_HTML in v 5.4.3 and 5.4.4)
# Default = False
# STRIP_INDEXES = False
# Should the sitemap list directories which only include other directories
# and no files.
# Default to True
# If this is False
# e.g. /2012 includes only /01, /02, /03, /04, ...: don't add it to the sitemap
# if /2012 includes any files (including index.html)... add it to the sitemap
# SITEMAP_INCLUDE_FILELESS_DIRS = True
# List of files relative to the server root (!) that will be asked to be excluded
# from indexing and other robotic spidering. * is supported. Will only be effective
# if SITE_URL points to server root. The list is used to exclude resources from
# /robots.txt and /sitemap.xml, and to inform search engines about /sitemapindex.xml.
# ROBOTS_EXCLUSIONS = ["/archive.html", "/category/*.html"]
# Instead of putting files in <slug>.html, put them in
# <slug>/index.html. Also enables STRIP_INDEXES
# This can be disabled on a per-page/post basis by adding
# .. pretty_url: False
# to the metadata
# PRETTY_URLS = False
# If True, publish future dated posts right away instead of scheduling them.
# Defaults to False.
# FUTURE_IS_NOW = False
# If True, future dated posts are allowed in deployed output
# Only the individual posts are published/deployed; not in indexes/sitemap
# Generally, you want FUTURE_IS_NOW and DEPLOY_FUTURE to be the same value.
# DEPLOY_FUTURE = False
# If False, draft posts will not be deployed
# DEPLOY_DRAFTS = True
# Allows scheduling of posts using the rule specified here (new_post -s)
# Specify an iCal Recurrence Rule: http://www.kanzaki.com/docs/ical/rrule.html
# SCHEDULE_RULE = ''
# If True, use the scheduling rule to all posts by default
# SCHEDULE_ALL = False
# Do you want a add a Mathjax config file?
# MATHJAX_CONFIG = ""
# If you are using the compile-ipynb plugin, just add this one:
# MATHJAX_CONFIG = """
# <script type="text/x-mathjax-config">
# MathJax.Hub.Config({
# tex2jax: {
# inlineMath: [ ['$','$'], ["\\\(","\\\)"] ],
# displayMath: [ ['$$','$$'], ["\\\[","\\\]"] ]
# },
# displayAlign: 'left', // Change this to 'center' to center equations.
# "HTML-CSS": {
# styles: {'.MathJax_Display': {"margin": 0}}
# }
# });
# </script>
# """
# Do you want to customize the nbconversion of your IPython notebook?
# IPYNB_CONFIG = {}
# With the following example configuracion you can use a custom jinja template
# called `toggle.tpl` which has to be located in your site/blog main folder:
# IPYNB_CONFIG = {'Exporter':{'template_file': 'toggle'}}
# What Markdown extensions to enable?
# You will also get gist, nikola and podcast because those are
# done in the code, hope you don't mind ;-)
# Note: most Nikola-specific extensions are done via the Nikola plugin system,
# with the MarkdownExtension class and should not be added here.
# MARKDOWN_EXTENSIONS = ['fenced_code', 'codehilite']
# Social buttons. This is sample code for AddThis (which was the default for a
# long time). Insert anything you want here, or even make it empty.
# (translatable)
SOCIAL_BUTTONS_CODE = ""
# SOCIAL_BUTTONS_CODE = """
# <!-- Social buttons -->
# <div id="addthisbox" class="addthis_toolbox addthis_peekaboo_style addthis_default_style addthis_label_style addthis_32x32_style">
# <a class="addthis_button_more">Share</a>
# <ul><li><a class="addthis_button_facebook"></a>
# <li><a class="addthis_button_google_plusone_share"></a>
# <li><a class="addthis_button_linkedin"></a>
# <li><a class="addthis_button_twitter"></a>
# </ul>
# </div>
# <script src="//s7.addthis.com/js/300/addthis_widget.js#pubid=ra-4f7088a56bb93798"></script>
# <!-- End of social buttons -->
# """
# Show link to source for the posts?
# Formerly known as HIDE_SOURCELINK (inverse)
SHOW_SOURCELINK = True
# Copy the source files for your pages?
# Setting it to False implies SHOW_SOURCELINK = False
COPY_SOURCES = True
# Modify the number of Post per Index Page
# Defaults to 10
# Sef: I write longer pieces, 10 is too many.
INDEX_DISPLAY_POST_COUNT = 8
# By default, Nikola generates RSS files for the website and for tags, and
# links to it. Set this to False to disable everything RSS-related.
# GENERATE_RSS = True
# RSS_LINK is a HTML fragment to link the RSS or Atom feeds. If set to None,
# the base.tmpl will use the feed Nikola generates. However, you may want to
# change it for a feedburner feed or something else.
# RSS_LINK = None
# Show only teasers in the RSS feed? Default to True
# RSS_TEASERS = True
# Strip HTML in the RSS feed? Default to False
# RSS_PLAIN = False
# A search form to search this site, for the sidebar. You can use a google
# custom search (http://www.google.com/cse/)
# Or a duckduckgo search: https://duckduckgo.com/search_box.html
# Default is no search form.
# (translatable)
# SEARCH_FORM = ""
#
# This search form works for any site and looks good in the "site" theme where
# it appears on the navigation bar:
#
# SEARCH_FORM = """
# <!-- Custom search -->
# <form method="get" id="search" action="//duckduckgo.com/"
# class="navbar-form pull-left">
# <input type="hidden" name="sites" value="%s"/>
# <input type="hidden" name="k8" value="#444444"/>
# <input type="hidden" name="k9" value="#D51920"/>
# <input type="hidden" name="kt" value="h"/>
# <input type="text" name="q" maxlength="255"
# placeholder="Search…" class="span2" style="margin-top: 4px;"/>
# <input type="submit" value="DuckDuckGo Search" style="visibility: hidden;" />
# </form>
# <!-- End of custom search -->
# """ % SITE_URL
#
# If you prefer a google search form, here's an example that should just work:
SEARCH_FORM = """
<!-- Custom search with google-->
<form id="search" action="http://google.com/search" method="get" class="navbar-form pull-left">
<input type="hidden" name="q" value="site:%s" />
<input type="text" name="q" size="60" maxlength="255" results="0" placeholder="Search on Google"/>
</form>
<!-- End of custom search -->
""" % SITE_URL
# Also, there is a local search plugin you can use, based on Tipue, but it requires setting several
# options:
# SEARCH_FORM = """
# <span class="navbar-form pull-left">
# <input type="text" id="tipue_search_input">
# </span>
# """
#BODY_END = """
#<script type="text/javascript" src="/assets/js/tipuesearch_set.js"></script>
#<script type="text/javascript" src="/assets/js/tipuesearch.js"></script>
#<script type="text/javascript">
#$(document).ready(function() {
# $('#tipue_search_input').tipuesearch({
# 'mode': 'json',
# 'contentLocation': '/assets/js/tipuesearch_content.json',
# 'showUrl': false
# });
#});
#</script>
#"""
# Google Analytics on every page
BODY_END = """
<script>
(function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
(i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
})(window,document,'script','//www.google-analytics.com/analytics.js','ga');
ga('create', 'UA-30366531-1', 'kloninger.com');
ga('send', 'pageview');
</script>
<script async src="//platform.twitter.com/widgets.js" charset="utf-8"></script>
"""
#EXTRA_HEAD_DATA = """
#<link rel="stylesheet" type="text/css" href="/assets/css/tipuesearch.css">
#<div id="tipue_search_content" style="margin-left: auto; margin-right: auto; padding: 20px;"></div>
#"""
#ENABLED_EXTRAS = ['local_search']
# Use content distribution networks for jquery and twitter-bootstrap css and js
# If this is True, jquery is served from the Google CDN and twitter-bootstrap
# is served from the NetDNA CDN
# Set this to False if you want to host your site without requiring access to
# external resources.
# USE_CDN = False
# Extra things you want in the pages HEAD tag. This will be added right
# before </head>
# (translatable)
# EXTRA_HEAD_DATA = ""
# Google Analytics or whatever else you use. Added to the bottom of <body>
# in the default template (base.tmpl).
# (translatable)
# BODY_END = ""
# The possibility to extract metadata from the filename by using a
# regular expression.
# To make it work you need to name parts of your regular expression.
# The following names will be used to extract metadata:
# - title
# - slug
# - date
# - tags
# - link
# - description
#
# An example re is the following:
# '(?P<date>\d{4}-\d{2}-\d{2})-(?P<slug>.*)-(?P<title>.*)\.md'
# FILE_METADATA_REGEXP = None
# If you hate "Filenames with Capital Letters and Spaces.md", you should
# set this to true.
UNSLUGIFY_TITLES = True
# Additional metadata that is added to a post when creating a new_post
# ADDITIONAL_METADATA = {}
# Nikola supports Open Graph Protocol data for enhancing link sharing and
# discoverability of your site on Facebook, Google+, and other services.
# Open Graph is enabled by default.
# USE_OPEN_GRAPH = True
# Nikola supports Twitter Card summaries
# Twitter cards are disabled by default. They make it possible for you to
# attach media to Tweets that link to your content.
#
# IMPORTANT:
# Please note, that you need to opt-in for using Twitter Cards!
# To do this please visit
# https://dev.twitter.com/form/participate-twitter-cards
#
# Uncomment and modify to following lines to match your accounts.
# Specifying the id for either 'site' or 'creator' will be preferred
# over the cleartext username. Specifying an ID is not necessary.
# Displaying images is currently not supported.
# TWITTER_CARD = {
# # 'use_twitter_cards': True, # enable Twitter Cards
# # 'site': '@website', # twitter nick for the website
# # 'site:id': 123456, # Same as site, but the website's Twitter user ID
# # instead.
# # 'creator': '@username', # Username for the content creator / author.
# # 'creator:id': 654321, # Same as creator, but the Twitter user's ID.
# }
# If webassets is installed, bundle JS and CSS to make site loading faster
USE_BUNDLES = True
# Plugins you don't want to use. Be careful :-)
# DISABLED_PLUGINS = ["render_galleries"]
# Add the absolute paths to directories containing plugins to use them.
# For example, the `plugins` directory of your clone of the Nikola plugins
# repository.
# EXTRA_PLUGINS_DIRS = []
# List of regular expressions, links matching them will always be considered
# valid by "nikola check -l"
# LINK_CHECK_WHITELIST = []
# If set to True, enable optional hyphenation in your posts (requires pyphen)
# HYPHENATE = False
# The <hN> tags in HTML generated by certain compilers (reST/Markdown)
# will be demoted by that much (1 → h1 will become h2 and so on)
# This was a hidden feature of the Markdown and reST compilers in the
# past. Useful especially if your post titles are in <h1> tags too, for
# example.
# (defaults to 1.)
# DEMOTE_HEADERS = 1
# You can configure the logging handlers installed as plugins or change the
# log level of the default stderr handler.
# WARNING: The stderr handler allows only the loglevels of 'INFO' and 'DEBUG'.
# This is done for safety reasons, as blocking out anything other
# than 'DEBUG' may hide important information and break the user
# experience!
LOGGING_HANDLERS = {
'stderr': {'loglevel': 'INFO', 'bubble': True},
# 'smtp': {
# 'from_addr': 'test-errors@example.com',
# 'recipients': ('test@example.com'),
# 'credentials':('testusername', 'password'),
# 'server_addr': ('127.0.0.1', 25),
# 'secure': (),
# 'level': 'DEBUG',
# 'bubble': True
# }
}
# Templates will use those filters, along with the defaults.
# Consult your engine's documentation on filters if you need help defining
# those.
# TEMPLATE_FILTERS = {}
# Put in global_context things you want available on all your templates.
# It can be anything, data, functions, modules, etc.
GLOBAL_CONTEXT = {}
|
sefk/sef-site
|
conf.py
|
Python
|
apache-2.0
| 40,645
|
[
"VisIt"
] |
75584ca32436d53c46d5011f060871e723a820f358be07aef8e3f2bb818b726c
|
"""
Project
FPGA-Imaging-Library
Design
RankFilter
Function
Local filter - Rank filter, it always used for denoising with preserving edge.
Module
Software simulation.
Version
1.0
Modified
2015-05-21
Copyright (C) 2015 Tianyu Dai (dtysky) <dtysky@outlook.com>
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
Homepage for this project:
http://fil.dtysky.moe
Sources for this project:
https://github.com/dtysky/FPGA-Imaging-Library
My e-mail:
dtysky@outlook.com
My blog:
http://dtysky.moe
"""
__author__ = 'Dai Tianyu (dtysky)'
def rank_filter(window, rank):
win = []
for row in window:
win += row
return sorted(win)[rank]
|
hj3938/FPGA-Imaging-Library
|
LocalFilter/ThresholdLocal/SoftwareSim/RankFilter.py
|
Python
|
lgpl-2.1
| 1,298
|
[
"MOE"
] |
9ea11f98b1152b6bd4fa0140c89427f5d5c48609b6620e01b1fc818e7fa0c384
|
## Copyright (c) 2015 Ryan Koesterer GNU General Public License v3
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
import pandas as pd
import numpy as np
from uga import Geno
from uga import Parse
from uga import Variant
import pysam
from Bio import bgzf
from uga import Process
import multiprocessing as mp
import sys
import os
import logging
import pickle
logging.basicConfig(format='%(asctime)s - %(processName)s - %(name)s - %(message)s',level=logging.DEBUG)
logger = logging.getLogger("RunMerge")
def process_regions(regions_df, cfg, cpu, log):
regions_df = regions_df[regions_df['cpu'] == cpu].reset_index(drop=True)
if log:
try:
log_file = open(cfg['out'] + '.cpu' + str(cpu) + '.log','w')
except:
print(Process.Error("unable to initialize log file " + cfg['out'] + '.cpu' + str(cpu) + '.log').out)
return 1
else:
stdout_orig = sys.stdout
stderr_orig = sys.stderr
sys.stdout = log_file
sys.stderr = log_file
results_obj = {}
for f in cfg['file_order']:
print("\nloading results file " + f)
try:
results_obj[f] = Geno.Results(filename=cfg['files'][f], chr=cfg['columns'][f]['chr'], pos=cfg['columns'][f]['pos'], id=cfg['columns'][f]['id'], a1=cfg['columns'][f]['a1'], a2=cfg['columns'][f]['a2'])
except Process.Error as err:
print(err.out)
return 1
variants_found = False
variant_ref = Variant.Ref()
results_final = None
for k in range(len(regions_df.index)):
region_written = False
print('')
print('loading region ' + str(k+1) + '/' + str(len(regions_df.index)) + ' (' + regions_df['region'][k] + ') ...')
for f in cfg['file_order']:
try:
results_obj[f].get_region(regions_df['region'][k])
except:
pass
try:
results_obj[f].get_snvs(cfg['buffer'])
except:
pass
variants_found = True
if f == cfg['file_order'][0]:
variant_ref.load(results_obj[f].snv_results)
else:
variant_ref.update(results_obj[f].snv_results)
results_obj[f].align_results(variant_ref)
results_obj[f].tag_results(f)
if not region_written:
results_region = results_obj[f].snv_results_tagged.copy()
region_written = True
else:
results_region_cols = [x for x in results_region.columns.values] + [x for x in results_obj[f].snv_results_tagged.columns.values if x not in results_region.columns.values]
if results_region.empty and not results_obj[f].snv_results_tagged.empty:
results_region=pd.concat([results_obj[f].snv_results_tagged[['chr','pos','id','a1','a2','id_unique','___uid___']].iloc[[0]],pd.DataFrame(dict(list(zip([x for x in results_region.columns.values if x not in ['chr','pos','id','a1','a2','id_unique','___uid___']],[np.nan for x in results_region.columns.values if x not in ['chr','pos','id','a1','a2','id_unique','___uid___']]))),index=[0])],axis=1)
results_region = results_region.merge(results_obj[f].snv_results_tagged, how='outer')
results_region = results_region[results_region_cols]
status = ' (' + f + ') processed ' + str(results_obj[f].snv_results.shape[0]) + ' variants'
print(status)
sys.stdout.flush()
if k == 0:
results_final = results_region.copy()
else:
results_final = results_final.merge(results_region, how='outer')
results_final = results_final[[a for a in results_final.columns if a not in ['id_unique','___uid___']]]
out_dtypes = results_final.dtypes.apply(lambda x: x.name).to_dict()
for col in [x for x in results_final.columns if x in out_dtypes and out_dtypes[x] != 'object']:
results_final[col] = results_final[col].astype(out_dtypes[col])
for col in [x for x in results_final.columns if x in out_dtypes and out_dtypes[x] == 'object']:
results_final[col] = results_final[col].str.decode("utf-8")
results_final = results_final.sort_values(by=['chr','pos'])
results_final['chr'] = results_final['chr'].astype(np.int64)
results_final['pos'] = results_final['pos'].astype(np.int64)
pkl = open('/'.join(cfg['out'].split('/')[0:-1]) + '/' + cfg['out'].split('/')[-1] + '.cpu' + str(cpu) + '.pkl', "wb")
pickle.dump([results_final,np.array(results_final.columns.values)],pkl,protocol=2)
pkl.close()
if log:
sys.stdout = stdout_orig
log_file.close()
if variants_found:
return 0
else:
return -1
def RunMerge(args):
cfg = Parse.generate_merge_cfg(args)
Parse.print_merge_options(cfg)
if not cfg['debug']:
logging.disable(logging.CRITICAL)
regions_df = pd.read_table(cfg['region_file'], compression='gzip' if cfg['region_file'].split('.')[-1] == 'gz' else None)
regions_df = regions_df[regions_df['job'] == int(cfg['job'])].reset_index(drop=True)
return_values = {}
print('')
try:
bgzfile = bgzf.BgzfWriter(cfg['out'] + '.gz', 'wb')
except:
print(Process.Error("failed to initialize bgzip format out file " + cfg['out'] + '.gz').out)
return 1
if cfg['cpus'] > 1:
pool = mp.Pool(cfg['cpus']-1)
for i in range(1,cfg['cpus']):
return_values[i] = pool.apply_async(process_regions, args=(regions_df,cfg,i,True,))
print("submitting job on cpu " + str(i) + " of " + str(cfg['cpus']))
pool.close()
print("executing job for cpu " + str(cfg['cpus']) + " of " + str(cfg['cpus']) + " via main process")
main_return = process_regions(regions_df,cfg,cfg['cpus'],True)
pool.join()
if 1 in [return_values[i].get() for i in return_values] or main_return == 1:
print(Process.Error("error detected, see log files").out)
return 1
else:
main_return = process_regions(regions_df,cfg,1,True)
if main_return == 1:
print(Process.Error("error detected, see log files").out)
return 1
for i in range(1,cfg['cpus']+1):
try:
logfile = open(cfg['out'] + '.cpu' + str(i) + '.log', 'r')
except:
print(Process.Error("failed to initialize log file " + cfg['out'] + '.cpu' + str(i) + '.log').out)
return 1
print(logfile.read())
logfile.close()
os.remove(cfg['out'] + '.cpu' + str(i) + '.log')
written = False
for i in range(1,cfg['cpus']+1):
out = '/'.join(cfg['out'].split('/')[0:-1]) + '/' + cfg['out'].split('/')[-1] + '.cpu' + str(i) + '.pkl'
pkl = open(out,"rb")
results_final,results_header = pickle.load(pkl)
if not written:
bgzfile.write('#' + '\t'.join(results_header) + '\n')
written = True
if results_final.shape[0] > 0:
bgzfile.write(results_final.replace({'None': 'NA', 'nan': 'NA'}).to_csv(index=False, sep='\t', header=False, na_rep='NA', float_format='%.5g', columns = results_header))
pkl.close()
os.remove(out)
bgzfile.close()
print("indexing out file")
try:
pysam.tabix_index(cfg['out'] + '.gz',seq_col=0,start_col=1,end_col=1,force=True)
except:
print(Process.Error('failed to generate index for file ' + cfg['out'] + '.gz').out)
return 1
if cfg['snpeff']:
from configparser import SafeConfigParser
from pkg_resources import resource_filename
import subprocess
import xlsxwriter
import time
ini = SafeConfigParser()
ini.read(resource_filename('uga', 'settings.ini'))
results_final = pd.read_table(cfg['out'] + '.gz')
outdf = results_final[['#chr','pos','id','a1','a2']]
outdf = outdf.rename(columns={'#chr':'#CHROM','pos':'POS','id':'ID','a1':'REF','a2':'ALT'})
outdf['QUAL'] = None
outdf['FILTER'] = None
outdf['INFO'] = None
outdf.to_csv(cfg['out'] + '.annot1',header=True, index=False, sep='\t')
time.sleep(1)
try:
cmd = 'java -jar ' + ini.get('main','snpeff') + ' -s ' + cfg['out'] + '.annot.summary.html -v -canon GRCh37.75 ' + cfg['out'] + '.annot1 > ' + cfg['out'] + '.annot2'
print(cmd)
p = subprocess.Popen(cmd,shell=True)
p.wait()
except KeyboardInterrupt:
kill_all(p.pid)
print("canonical annotation process terminated by user")
sys.exit(1)
return
time.sleep(1)
try:
cmd = 'java -jar ' + ini.get('main','snpsift') + ' extractFields -s "," -e "NA" ' + cfg['out'] + '.annot2 CHROM POS ID REF ALT "ANN[*].ALLELE" "ANN[*].EFFECT" "ANN[*].IMPACT" "ANN[*].GENE" "ANN[*].GENEID" "ANN[*].FEATURE" "ANN[*].FEATUREID" "ANN[*].BIOTYPE" "ANN[*].RANK" "ANN[*].HGVS_C" "ANN[*].HGVS_P" "ANN[*].CDNA_POS" "ANN[*].CDNA_LEN" "ANN[*].CDNA_LEN" "ANN[*].CDS_POS" "ANN[*].CDS_LEN" "ANN[*].AA_POS" "ANN[*].AA_LEN" "ANN[*].DISTANCE" "ANN[*].ERRORS" | sed "s/ANN\[\*\]/ANN/g" > ' + cfg['out'] + '.annot'
print(cmd)
p = subprocess.Popen(cmd,shell=True)
p.wait()
except KeyboardInterrupt:
kill_all(p.pid)
print("SnpSift annotation process terminated by user")
sys.exit(1)
os.remove(cfg['out'] + '.annot1')
os.remove(cfg['out'] + '.annot2')
results_final = results_final.rename(columns={'#chr':'#CHROM','pos':'POS','id':'ID','a1':'REF','a2':'ALT'})
annot = pd.read_table(cfg['out'] + '.annot')
out = results_final.merge(annot,how='outer')
out.fillna('NA',inplace=True)
wkbk = xlsxwriter.Workbook(cfg['out'] + '.annot.xlsx')
wksht = wkbk.add_worksheet()
header_format = wkbk.add_format({'bold': True,
'align': 'center',
'valign': 'vcenter'})
string_format = wkbk.add_format({'align': 'center', 'valign': 'center'})
float_format = wkbk.add_format({'align': 'center', 'valign': 'center'})
float_format.set_num_format('0.000')
integer_format = wkbk.add_format({'align': 'center', 'valign': 'center'})
integer_format.set_num_format('0')
sci_format = wkbk.add_format({'align': 'center', 'valign': 'center'})
sci_format.set_num_format('0.00E+00')
i = 0
for field in out.columns:
wksht.write(0,i,field,header_format)
i += 1
i = 0
for row in range(out.shape[0]):
j = 0
for field in out.columns:
if field in ['#CHROM','POS'] or field.endswith('.filtered') or field.endswith('.n'):
wksht.write(row+1,j,out[field][i], integer_format)
elif field.endswith(('.p','hwe','hwe.unrel')):
wksht.write(row+1,j,out[field][i], sci_format)
elif field.endswith(('.effect','.stderr','.or','.z','freq','freq.unrel','rsq','rsq.unrel','callrate')):
wksht.write(row+1,j,out[field][i], float_format)
else:
wksht.write(row+1,j,out[field][i], string_format)
j += 1
i += 1
wksht.freeze_panes(1, 0)
wkbk.close()
os.remove(cfg['out'] + '.annot')
print("process complete")
return 0
|
rmkoesterer/uga
|
uga/RunMerge.py
|
Python
|
gpl-3.0
| 10,695
|
[
"pysam"
] |
c767c7f372338a5f9102719db891e3db93b2a96a09e8dfe76566e69c5c883ca4
|
#---------written by Felix Oesterle (FSO)-----------------
#-DESCRIPTION:
# This is based on fabfile from Raincloud Project (simplified)
#
#-Last modified: Thu Jul 09, 2015 13:10
#@author Felix Oesterle
#-----------------------------------------------------------
from __future__ import with_statement, print_function
from fabric.api import *
import boto.ec2
from boto.vpc import VPCConnection
from boto.ec2.blockdevicemapping import EBSBlockDeviceType, BlockDeviceMapping
import os
import time
import sys
import socket
import datetime
import math
from collections import defaultdict
#-----------------------------------------------------------
# SHORT DOCU
#-----------------------------------------------------------
# -------- SETUP BOTO and Fabric-----------------
# Virtualenv/Generic pip:
# pip install boto fabric
#
# Conda:
# conda install boto fabric
#
# Debian/Ubuntu:
# apt-get install fabric python-boto
#
# install all other missing modules from list above (start eg. ipython an copy all imports above and see what's missing;
# all modules should be available via pip or easy_install)
#
# Create credentials file: ~/.boto and fill it with the info given by admin (most likely Ben)
# (replace XXXX with what you want to use in fabfile)
#
# [profile XXXX]
# aws_access_key_id = YOUR Access Key ID HERE
# aws_secret_access_key = YOUR Secret Access Key HERE
#
# If you don't want to be prompted to accept ssh keys with every new instance, place these lines into the ~/.ssh/config file:
#
# Host *amazonaws.com
# User root
# StrictHostKeyChecking no
# UserKnownHostsFile /dev/null
#
#
# ------------RUNNING-------------
# look at fabfile.py
#
# to list all possible task of fabfile:
# fab -l
#
# A few first steps:
# 1. Go through setup below and adjust at least: ec2Profile, def_logfile
# 2. Create instance with
# fab cloud_make
# If you are using spot instances and require your instances to be in the same region
# fab instance_start
# This will use the region configured in def_default_avz.
# 3. Takes between 5 - 10 minutes (if still using spot as def_default_requesttype)
# 4. Use
# fab install_node_software
# to setup a virtualenv ready for OGGM.
#
# If you already setup a virtualenv on your user volume
# fab install_node_apt
# to install only required system components.
# 5. Use
# fab connect
# to ssh into instance
# 6. play around with the instance, install software etc
# 7. look at current costs with
# fab calc_approx_costs_running
# or list all instances with
# fab cloud_list
# 8. Once you have enough, shut down your instance via
# fab terminate_one
# Or terminate all running instances if you are sure they all belong to you
# fab cloud_terminate
# you can also delete volumes with:
# fab terminate_perm_user_vol:name='your_volume_name'
#-----------------------------------------------------------
# SETUP
#-----------------------------------------------------------
env.disable_known_hosts=True
env.user = 'ubuntu'
# FSO--- default name used in tags and instance names:
# set this eg. to your name
def_cn = 'AWS'
# Change to a string identifying yourself
user_identifier = None
# FSO--- ssh and credentials setup
# FSO---the name of the amazon keypair (will be created if it does not exist)
keyn=(user_identifier or 'None') + '_oggm'
# FSO--- the same name as you used in boto setup XXXX (see Readme)
ec2Profile = 'OGGM'
def_key_dir=os.path.expanduser('~/.ssh')
# FSO--- Amazon AWS region setup
def_regions = ['us-east-1','eu-west-1'] #regions for spot search
def_default_avz = 'eu-west-1a' #Default availability zone if ondemand is used
# FSO--- type of instance pricing, either:
# ondemand: faster availability, more expensive
# spot: cheaper, takes longer to start up, might be shutdown without warning
def_default_requesttype = 'spot'
# def_default_requesttype = 'ondemand'
# FSO--- the AMI to use
def_ami = dict()
def_ami['eu-west-1'] = 'ami-c32610a5' #eu Ubuntu 16.04 LTS oggm-base
def_ami['us-east-1'] = 'ami-9f3e9689' #us Ubuntu 16.04 LTS oggm-base
# Subnet to use per AVZ, expects a tuple (vpc-id, subnet-id)
def_subnet = dict()
def_subnet['eu-west-1a'] = ('vpc-61f04204', 'subnet-306ff847')
def_subnet['eu-west-1b'] = ('vpc-61f04204', 'subnet-6ad17933')
def_subnet['us-west-1c'] = ('vpc-61f04204', 'subnet-2e2f414b')
# Size of the rootfs of created instances
rootfs_size_gb = 50
# Name and size of the persistent /work file system
home_volume_ebs_name = "ebs_" + (user_identifier or 'None') # Set to None to disable home volume
new_homefs_size_gb = 50 # GiB, only applies to newly created volumes
# FSO---log file with timestamps to analyse cloud performance
# look at it with tail -f cloudexecution.log
def_logfile = os.path.expanduser('~/cloudexecution.log')
# Default instance type, index into instance_infos array below
def_inst_type = 1
#-----------------------------------------------------------
# SETUP END
#-----------------------------------------------------------
fabfile_dir = os.path.dirname(os.path.abspath(__file__))
if user_identifier is None:
raise RuntimeError('user identifier must be set')
instance_infos = [
{
'type': 't2.micro',
'vcpus': 1,
'price': 0.014,
},
{
'type': 'm4.xlarge',
'vcpus': 4,
'price': 0.264,
},
{
'type': 'c4.2xlarge',
'vcpus': 8,
'price': 0.477,
},
{
'type': 'c4.8xlarge',
'vcpus': 36,
'price': 1.906,
},
]
def_price = instance_infos[def_inst_type]['price']
def update_key_filename(region):
key_name = get_keypair_name(region)
key_dir = os.path.expanduser(def_key_dir)
key_dir = os.path.expandvars(key_dir)
env.key_filename = os.path.join(key_dir, key_name + '.pem')
print('Current key filename: %s' % env.key_filename)
def find_inst_info(inst_type):
for info in instance_infos:
if info['type'] == inst_type:
return info
return None
@task
def cloud_make(cn=def_cn):
"""
Start and prepare instance -THIS IS THE MAIN ACTIVITY-
"""
# t = time.time()
log_with_ts("fabric started ------------------------------")
log_with_ts("Instance: " + instance_infos[def_inst_type]['type'] + "(" + str(instance_infos[def_inst_type]['vcpus']) + " CPUs)")
# FSO---set best avz
if def_default_requesttype == 'spot':
best_avz,request_type = get_cheapest_availability_zone(def_price)
else:
best_avz = def_default_avz
request_type = 'ondemand'
print(best_avz, request_type)
log_with_ts('avz: ' + best_avz)
log_with_ts('request_type: ' + request_type)
# FSO--- start instances
instance_start(cn=cn,avz=best_avz,rt=request_type)
print("Done setting up instance")
log_with_ts("instance ready")
# t_init = time.time()
# # FSO--- run workflow and get cost of nodes back
# this is an example, adjust as needed
# tf = run_workflow(cn=cn,avz=best_avz)
# # FSO--- get costs of and log
# costs = calc_approx_costs_running(cn=cn')
# log_with_ts('Ondemand costs: '+str(costs['ondemand'])+'USD')
# log_with_ts('Actual costs: '+str(costs['running'])+'USD')
# # FSO--- terminate instances
# uncomment if you want to terminate your instances automatically
# cloud_terminate(cn=cn)
# log_with_ts("all instances terminated")
# t_end = time.time()
# print "Time needed for init (min)", (t_init - t)/60.
# print "Time needed for workflow and terminate", (t_end - t_init)/60.
# log_with_ts("fabric end")
@task
def list_ubuntu_amis(regions=def_regions):
"""
List all available ubuntu 14.04 AMIs in all configured regions
"""
for region in regions:
print("Region:", region)
cloud = boto.ec2.connect_to_region(region,profile_name=ec2Profile)
imgs = cloud.get_all_images(owners=['099720109477'], filters={'architecture': 'x86_64', 'name': 'ubuntu/images/hvm-ssd/ubuntu-xenial-16.04-amd64-server-*'})
for img in sorted(imgs, key=lambda v: v.name):
print(img.id,':',img.name)
print()
@task
def instance_start(cn=def_cn,
avz=def_default_avz,
rt=def_default_requesttype):
"""
Start and prepare instances
"""
# FSO---find already existing nodes
cloud = boto.ec2.connect_to_region(avz[:-1],profile_name=ec2Profile)
filters = {'tag:type': cn+'node'}
insta = cloud.get_all_instances(filters=filters)
# FSO---install each new node
print("Requesting new instance")
log_with_ts("Requesting new instance")
nodenumber = len(insta) + 1
node_install(cn=cn, avz=avz, rt=rt, idn=nodenumber)
log_with_ts('Finished installing instance')
cloud_list()
def print_instance(inst):
if inst.state != 'terminated':
cu_time = datetime.datetime.utcnow()
it = datetime.datetime.strptime(inst.launch_time,'%Y-%m-%dT%H:%M:%S.000Z')
else:
try:
cu_time = datetime.datetime.strptime(inst.tags.get('terminate_time'),'%Y-%m-%dT%H:%M:%S.%f')
except:
cu_time = datetime.datetime.utcnow()
it = datetime.datetime.strptime(inst.launch_time,'%Y-%m-%dT%H:%M:%S.000Z')
time_taken = cu_time - it
hours, rest = divmod(time_taken.total_seconds(),3600)
minutes, seconds = divmod(rest, 60)
print(inst.id, inst.instance_type, \
inst.tags.get('Name'), \
inst.tags.get('type'), \
inst.state, \
inst.dns_name, \
inst.private_ip_address, \
inst.private_dns_name, \
inst.tags.get('current_price'), \
inst.tags.get('billable_hours'), \
inst.tags.get('terminate_time'), \
inst.placement, \
'Subnet:%s' % inst.subnet_id, \
'Owner:%s' % inst.tags.get('node-owner'))
print("running for: ", hours,'h', minutes, "min")
def print_volume(vol):
info = ""
if 'vol-lifetime' in vol.tags:
info += '\tLifetime: ' + vol.tags['vol-lifetime']
if 'vol-user-name' in vol.tags:
info += '\tUservolume Name: ' + vol.tags['vol-user-name']
if 'vol-owner' in vol.tags:
info += '\tOwner: ' + vol.tags['vol-owner']
print(vol.id, "\t", vol.zone, "\t", vol.status, '\t', vol.size, info)
@task
def cloud_list(cn=def_cn,itype='all',regions=def_regions):
"""
List all ec2 instances.
"""
for region in regions:
cloud = boto.ec2.connect_to_region(region,profile_name=ec2Profile)
instances = cloud.get_all_instances()
vols = cloud.get_all_volumes()
print()
print("-------CURRENT RUNNING-----------")
print(" REGION:", region)
print()
print("Instances:")
print()
update_costs(cn=cn,regions=regions,itype=itype)
for reservation in instances:
for inst in reservation.instances:
print_instance(inst)
print()
print()
print("Volumes:")
print()
for vol in vols:
print_volume(vol)
def check_keypair(cloud, keynames):
# Check to see if specified keypair already exists.
# If we get an InvalidKeyPair.NotFound error back from EC2,
# it means that it doesn't exist and we need to create it.
key_dir = def_key_dir
try:
cloud.get_all_key_pairs(keynames=[keynames])[0]
except cloud.ResponseError as e:
if e.code == 'InvalidKeyPair.NotFound':
print('Creating keypair: %s' % keynames)
# Create an SSH key to use when logging into instances.
key = cloud.create_key_pair(keynames)
# Make sure the specified key_dir actually exists.
# If not, create it.
key_dir = os.path.expanduser(key_dir)
key_dir = os.path.expandvars(key_dir)
# if not os.path.isdir(key_dir):
# os.mkdir(key_dir, 0700)
#
# AWS will store the public key but the private key is
# generated and returned and needs to be stored locally.
# The save method will also chmod the file to protect
# your private key.
key.save(key_dir)
else:
raise
def get_keypair_name(region):
key_dir = def_key_dir
key_dir = os.path.expanduser(key_dir)
key_dir = os.path.expandvars(key_dir)
un_file = os.path.join(key_dir, '%s_unique.txt' % keyn)
if os.path.exists(un_file):
with open(un_file, 'r') as un:
unique_part = un.read().strip()
else:
import uuid
unique_part = str(uuid.uuid4().get_hex().upper()[0:8])
with open(un_file, 'w') as un:
un.write(unique_part)
return keyn + '_' + region + '_' + unique_part
def get_user_persist_ebs(cloud, avz):
if home_volume_ebs_name is None:
return None
vols = cloud.get_all_volumes(filters={'tag:vol-user-name':home_volume_ebs_name, 'availability-zone': avz})
if len(vols) == 0:
print("Creating new EBS volume for user volume %s" % home_volume_ebs_name)
vol = cloud.create_volume(new_homefs_size_gb, avz)
vol.add_tag('vol-user-name', home_volume_ebs_name)
vol.add_tag('vol-lifetime', 'perm')
vol.add_tag('vol-owner', user_identifier)
else:
vol = vols[0]
print("Found existing volume %s for user volume %s!" % (vol.id, home_volume_ebs_name))
if vol.status != 'available':
print("But it's not available...")
return None
return vol
@task
def node_install(cn=def_cn,inst_type_idx=def_inst_type,idn=0,
avz=def_default_avz,rt=def_default_requesttype,
group_name='oggmssh',
ssh_port=22,
cidr='0.0.0.0/0'):
"""
Request and prepare single instance
"""
# FSO---connect
cloud = boto.ec2.connect_to_region(avz[:-1],profile_name=ec2Profile)
aminfo = cloud.get_image(def_ami[avz[:-1]])
vpcconn = VPCConnection(region=cloud.region, profile_name=ec2Profile)
try:
vpc_id, subnet_id = def_subnet[avz]
vpc = vpcconn.get_all_vpcs(vpc_ids=[vpc_id])[0]
except:
vpc_id = None
subnet_id = None
vpc = None
# FSO---check if node with same name already exists
if node_exists(cn + '_node' + str(idn)):
print("Node already exists")
sys.exit()
# Check if ssh keypair exists
key_name = get_keypair_name(avz[:-1])
check_keypair(cloud, key_name)
# FSO---create a bigger root device
dev_sda1 = EBSBlockDeviceType()
dev_sda1.size = rootfs_size_gb
dev_sda1.delete_on_termination = True
bdm = BlockDeviceMapping()
bdm['/dev/sda1'] = dev_sda1
dev_sdf_vol = get_user_persist_ebs(cloud, avz)
# Check to see if specified security group already exists.
# If we get an InvalidGroup.NotFound error back from EC2,
# it means that it doesn't exist and we need to create it.
try:
group = cloud.get_all_security_groups(groupnames=[group_name])[0]
except cloud.ResponseError as e:
if e.code == 'InvalidGroup.NotFound':
print('Creating Security Group: %s' % group_name)
# Create a security group to control access to instance via SSH.
group = cloud.create_security_group(group_name, 'A group that allows SSH access')
else:
raise
# Authorize all Intra-VPC traffic
if vpc is not None:
try:
group.authorize('-1', -1, -1, vpc.cidr_block)
except cloud.ResponseError as e:
if e.code != 'InvalidPermission.Duplicate':
raise
# Add a rule to the security group to authorize SSH traffic
# on the specified port.
try:
group.authorize('tcp', ssh_port, ssh_port, cidr)
except cloud.ResponseError as e:
if e.code == 'InvalidPermission.Duplicate':
print('Security Group: %s already authorized' % group_name)
else:
raise
log_with_ts("request node "+str(idn))
print('Reserving instance for node', aminfo.id, instance_infos[inst_type_idx]['type'], aminfo.name, aminfo.region)
if rt == 'spot':
print("placing node in ",avz)
requests = cloud.request_spot_instances(def_price,
def_ami[avz[:-1]],
count=1,
type='one-time',
security_group_ids=[group.id],
key_name=key_name,
placement=avz,
subnet_id=subnet_id,
ebs_optimized=True,
instance_type=instance_infos[inst_type_idx]['type'],
block_device_map=bdm)
req_ids = [request.id for request in requests]
instance_ids = wait_for_fulfillment(cloud,req_ids)
instances = cloud.get_only_instances(instance_ids=instance_ids)
node = instances[0]
log_with_ts("fullfilled spot node "+str(idn))
else:
print("placing node in ",avz)
reservation = cloud.run_instances(image_id=def_ami[avz[:-1]],
key_name=key_name,
placement=avz,
subnet_id=subnet_id,
security_group_ids=[group.id],
ebs_optimized=True,
instance_type=instance_infos[inst_type_idx]['type'],
block_device_map=bdm)
node = reservation.instances[0]
log_with_ts("fullfilled ondemand node "+str(idn))
time.sleep(2)
while not node.update() == 'running':
print('waiting for', cn, 'node', idn, 'to boot...')
time.sleep(5)
log_with_ts("booted node "+str(idn))
if dev_sdf_vol is not None:
cloud.attach_volume(dev_sdf_vol.id, node.id, "/dev/sdf")
node.add_tag('Name', cn+'_node'+str(idn))
node.add_tag('type', cn+'node')
node.add_tag('node-owner', user_identifier)
# FSO---set delete on termination flag to true for ebs block device
node.modify_attribute('blockDeviceMapping', { '/dev/sda1' : True })
# FSO--- test socket connect to ssh service
ssh_test(node)
log_with_ts("reachable node "+str(idn))
update_key_filename(node.region.name)
# Mount potential user volume
if dev_sdf_vol is not None:
use_user_volume(node.dns_name)
log_with_ts("finished node "+str(idn))
@task
def install_node_software(nn=''):
"""
Setup ready-for-use virtualenv for OGGM on instance
"""
inst = select_instance(nn)
install_node_apt('', inst)
install_node_pip('', inst)
run('echo Rebooting... && sleep 1 && sudo shutdown -r now')
@task
def install_node_pip(nn='', inst=None):
"""
Install oggm dependencies via pip
"""
if inst is None:
inst = select_instance(nn)
update_key_filename(inst.region.name)
env.host_string = inst.dns_name
env.user = 'ubuntu'
run("""
export LC_ALL=C &&
source ~/.virtenvrc &&
workon oggm_env &&
pip install --upgrade pip &&
pip install numpy &&
pip install scipy &&
pip install pandas shapely cython &&
pip install matplotlib &&
pip install gdal==1.11.2 --install-option="build_ext" --install-option="--include-dirs=/usr/include/gdal" &&
pip install fiona --install-option="build_ext" --install-option="--include-dirs=/usr/include/gdal" &&
pip install mpi4py &&
pip install pyproj rasterio Pillow geopandas netcdf4 scikit-image configobj joblib xarray boto3 motionless pytest progressbar2 &&
pip install git+https://github.com/fmaussion/salem.git &&
sed -i 's/^backend.*/backend : Agg/' "${WORKON_HOME}"/oggm_env/lib/python?.?/site-packages/matplotlib/mpl-data/matplotlibrc
""", pty=False)
@task
def install_node_apt(nn='', inst=None):
"""
Install required OGGM apt dependencies
"""
if inst is None:
inst = select_instance(nn)
update_key_filename(inst.region.name)
env.host_string = inst.dns_name
env.user = 'ubuntu'
run("""
export LC_ALL=C &&
export DEBIAN_FRONTEND=noninteractive &&
sudo apt-get -y update &&
sudo apt-get -y dist-upgrade &&
sudo apt-get -y install build-essential liblapack-dev gfortran libproj-dev gdal-bin libgdal-dev netcdf-bin ncview python3-netcdf4 tk-dev python3-tk python3-dev python3-numpy-dev ttf-bitstream-vera python3-pip git awscli virtualenvwrapper openmpi-bin libopenmpi-dev
""", pty=False)
copy_files = ['~/.aws/credentials', '~/.aws/config', '~/.screenrc', '~/.gitconfig']
for cf in copy_files:
if not os.path.exists(os.path.expanduser(cf)):
continue
run('mkdir -p %s' % os.path.dirname(cf))
put(cf, cf)
run("""
if [ -e /work/ubuntu ]; then
mkdir -p /work/ubuntu/.pyvirtualenvs
echo '# Virtual environment options' > ~/.virtenvrc
echo 'export WORKON_HOME="/work/ubuntu/.pyvirtualenvs"' >> ~/.virtenvrc
echo 'source /usr/share/virtualenvwrapper/virtualenvwrapper_lazy.sh' >> ~/.virtenvrc
else
mkdir -p ~/.pyvirtualenvs
echo '# Virtual environment options' > ~/.virtenvrc
echo 'export WORKON_HOME="${HOME}/.pyvirtualenvs"' >> ~/.virtenvrc
echo 'source /usr/share/virtualenvwrapper/virtualenvwrapper_lazy.sh' >> ~/.virtenvrc
fi
if ! grep virtenvrc ~/.bashrc; then
echo >> ~/.bashrc
echo 'source ~/.virtenvrc' >> ~/.bashrc
fi
""")
# bashrc is not sourced for non-interactive shells, so source the virtenvrc explicitly
run("""
export LC_ALL=C
source ~/.virtenvrc
if ! [ -d ${WORKON_HOME}/oggm_env ]; then
mkvirtualenv oggm_env -p /usr/bin/python3
fi
""")
@task
def install_node_nfs_master(nn='', inst=None):
"""
Setup the node to act as NFS server, serving /home and /work
"""
if inst is None:
inst = select_instance(nn)
update_key_filename(inst.region.name)
env.host_string = inst.dns_name
env.user = 'ubuntu'
run("""
export LC_ALL=C &&
export DEBIAN_FRONTEND=noninteractive &&
sudo apt-get -y install nfs-kernel-server &&
sudo mkdir -p /work/ubuntu /export/work /export/home &&
sudo chown ubuntu:ubuntu /work/ubuntu &&
echo '/export *(rw,fsid=0,insecure,no_subtree_check,async)' > /tmp/exports &&
echo '/export/work *(rw,nohide,insecure,no_subtree_check,async)' >> /tmp/exports &&
echo '/export/home *(rw,nohide,insecure,no_subtree_check,async)' >> /tmp/exports &&
sudo cp --no-preserve=all /tmp/exports /etc/exports &&
cp /etc/fstab /tmp/fstab &&
echo '/work /export/work none bind 0 0' >> /tmp/fstab &&
echo '/home /export/home none bind 0 0' >> /tmp/fstab &&
sudo cp --no-preserve=all /tmp/fstab /etc/fstab &&
sudo mount /export/work &&
sudo mount /export/home &&
sudo sed -i 's/NEED_SVCGSSD=.*/NEED_SVCGSSD="no"/' /etc/default/nfs-kernel-server &&
sudo service nfs-kernel-server restart &&
echo "%s slots=$(( $(grep '^processor' /proc/cpuinfo | tail -n1 | cut -d ':' -f2 | xargs) + 1 ))" > /work/ubuntu/mpi_hostfile &&
ssh-keygen -t rsa -b 4096 -f ~/.ssh/id_rsa -N "" &&
cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys &&
echo Done
""" % inst.private_ip_address)
@task
def install_node_nfs_client(master_ip, nn='', inst=None):
"""
Setup the node to act as NFS client on the given master_ip.
"""
if inst is None:
inst = select_instance(nn)
update_key_filename(inst.region.name)
env.host_string = inst.dns_name
env.user = 'ubuntu'
run("""
export LC_ALL=C &&
cd / &&
sudo mkdir /work &&
export DEBIAN_FRONTEND=noninteractive &&
sudo apt-get -y install nfs-common &&
cp /etc/fstab /tmp/fstab &&
echo '%s:/work /work nfs4 _netdev,auto 0 0' >> /tmp/fstab
echo '%s:/home /home nfs4 _netdev,auto 0 0' >> /tmp/fstab
sudo cp --no-preserve=all /tmp/fstab /etc/fstab &&
sudo mount /work &&
echo "%s slots=$(( $(grep '^processor' /proc/cpuinfo | tail -n1 | cut -d ':' -f2 | xargs) + 1 ))" >> /work/ubuntu/mpi_hostfile &&
echo Rebooting... && sleep 1 && sudo shutdown -r now
""" % (master_ip, master_ip, inst.private_ip_address))
@task
def terminate_perm_user_vol(name=home_volume_ebs_name,regions=def_regions):
"""
Terminate the permanent user volume
"""
print(regions)
for region in regions:
cloud = boto.ec2.connect_to_region(region, profile_name=ec2Profile)
vols = cloud.get_all_volumes(filters={'tag:vol-user-name':name})
for vol in vols:
if vol.status == 'available':
print(vol.id,"\t", vol.status, "... deleted")
vol.delete()
else:
print(vol.id,"\t", vol.status, "... in use")
@task
def cloud_terminate(cn=def_cn,itype='all',regions=def_regions):
"""
Terminate all instances
"""
print(regions)
for region in regions:
print()
print("-------CURRENT RUNNING-----------")
print(" REGION:",region)
cloud = boto.ec2.connect_to_region(region, profile_name=ec2Profile)
instances = cloud.get_all_instances()
vol = cloud.get_all_volumes()
update_costs(cn=cn,itype=itype)
for reservation in instances:
for inst in reservation.instances:
if inst.state != 'terminated':
if itype == 'all':
print('TERMINATING', inst.tags.get('Name'), inst.dns_name)
inst.add_tag('Name', 'term')
inst.add_tag('type', 'term')
inst.terminate()
stati2 = datetime.datetime.utcnow()
inst.add_tag('terminate_time', stati2.isoformat())
elif itype == 'node' and inst.tags.get('type') == cn+'node':
print('TERMINATING', inst.tags.get('Name'), inst.dns_name)
inst.add_tag('Name', 'term')
inst.add_tag('type', 'term')
inst.terminate()
stati2 = datetime.datetime.utcnow()
inst.add_tag('terminate_time', stati2.isoformat())
elif itype == 'master' and inst.tags.get('type') == cn+'master':
print('TERMINATING', inst.tags.get('Name'), inst.dns_name)
inst.add_tag('Name', 'term')
inst.add_tag('type', 'term')
inst.terminate()
stati2 = datetime.datetime.utcnow()
inst.add_tag('terminate_time', stati2.isoformat())
for unattachedvol in vol:
if 'vol-lifetime' in unattachedvol.tags and unattachedvol.tags['vol-lifetime'] == 'perm':
print(unattachedvol.id,"\t", unattachedvol.status, "... is marked permanent")
elif unattachedvol.status == 'available':
print(unattachedvol.id,"\t", unattachedvol.status, "... deleted")
unattachedvol.delete()
else:
print(unattachedvol.id,"\t", unattachedvol.status, "... not deleted")
def select_instance(nn='', regions=def_regions):
"""
Prompt the user to select an instance
"""
instlist = list()
i = 0
for region in regions:
print()
print("-------CURRENT RUNNING-----------")
print(" REGION: ", region)
print()
cloud = boto.ec2.connect_to_region(region, profile_name=ec2Profile)
reservations = cloud.get_all_instances()
for reserv in reservations:
for inst in reserv.instances:
if inst.state == 'terminated':
continue
print('Instance %s:' % i)
print_instance(inst)
print()
instlist.append(inst)
i += 1
print()
if nn == '' or nn is None:
nn = prompt('Instance index:')
nn = int(nn)
if nn < 0 or nn >= len(instlist):
print('Instance index out of range!')
sys.exit(-1)
return instlist[nn]
def select_volume(nn='', regions=def_regions):
"""
Prompt the user to select a volume
"""
vollist = list()
i = 0
for region in regions:
print()
print("-------CURRENT RUNNING-----------")
print(" REGION: ", region)
print()
cloud = boto.ec2.connect_to_region(region, profile_name=ec2Profile)
vols = cloud.get_all_volumes()
for vol in vols:
print("Volume %s:" % i)
print_volume(vol)
print()
vollist.append(vol)
i += 1
print()
if nn == '' or nn is None:
nn = prompt('Volume index:')
nn = int(nn)
if nn < 0 or nn >= len(vollist):
print('Volume index out of range!')
sys.exit(-1)
return vollist[nn]
@task
def terminate_one(regions=def_regions, nn=''):
"""
Terminate one instance
"""
print('Select instance to terminate:')
print()
inst = select_instance(nn, regions)
inst.add_tag('Name', 'term')
inst.add_tag('type', 'term')
inst.terminate()
stati2 = datetime.datetime.utcnow()
inst.add_tag('terminate_time', stati2.isoformat())
@task
def terminate_volume(regions=def_regions, nn=''):
"""
Terminate one volume
"""
print('Select volume to terminate:')
print()
vol = select_volume(nn, regions)
vol.delete()
@task
def calc_approx_costs_running(cn=def_cn,regions=def_regions,itype ='all'):
"""
calculate compute costs (network or storage not included)
only running instances are considered
From amazon: The instances will be billed at the then-current Spot Price regardless of the actual bid
"""
# FSO---update the price tags for each node
update_costs(cn=cn,regions=regions,itype=itype)
costs = dict()
costs['running'] = 0.0
costs['ondemand'] = 0.0
for region in regions:
cloud = boto.ec2.connect_to_region(region,profile_name=ec2Profile)
instances = cloud.get_all_instances()
print()
print("----------REGION:",region,itype,'-----------')
for reservation in instances:
for inst in reservation.instances:
if inst.state == 'running' and (inst.tags.get('type')==cn+itype or itype=='all'):
hours = float(inst.tags.get('billable_hours'))
cu_price = float(inst.tags.get('current_price'))
cu_ondemand_price = hours * find_inst_info(inst.instance_type)['price']
print()
print(inst.id, inst.instance_type, \
inst.tags.get('Name'), \
inst.dns_name,\
inst.tags.get('current_price')+'USD', \
inst.tags.get('billable_hours')+'h', \
inst.placement)
# print 'Billable hours ',hours
# print 'Current price', cu_price
# print 'Current ondemand price', cu_ondemand_price
costs['ondemand'] += cu_ondemand_price
if inst.spot_instance_request_id is None:
print('ondemand instance')
costs['running'] = cu_ondemand_price
else:
print('spot instance')
costs['running'] += cu_price
print()
print('Total ondemand: ', costs['ondemand'])
print('Total of running: ' , costs['running'])
return costs
@task
def connect(nn='', user='ubuntu'):
"""
SSH to cloud instances
"""
inst = select_instance(nn)
update_key_filename(inst.region.name)
print('ssh', '-i', os.path.expanduser(env.key_filename), '%s@%s' % (user, inst.dns_name))
print('...')
print()
os.execlp('ssh', 'ssh', '-i', os.path.expanduser(env.key_filename), '%s@%s' % (user, inst.dns_name))
def get_cheapest_availability_zone(ondemand_price):
"""
get the cheapest avz and check if below ondemand_price
BEWARE: does not necessarily get the cheapest avz at the moment, but the one with the lowest maxium price
in the last 24 hours. Hopefully that's the most stable price-wise
"""
avz_prices_nodes = defaultdict(list)
for region in def_regions:
cloud = boto.ec2.connect_to_region(region,profile_name=ec2Profile)
stati2 = datetime.datetime.utcnow()
stati1 = stati2 - datetime.timedelta(hours=3)
prices = cloud.get_spot_price_history(
# instance_type=def_itype,
# instance_type=['m1.small','m1.medium'],
start_time=stati1.isoformat(),
end_time= stati2.isoformat(),
product_description='Linux/UNIX')
# FSO---split in availability_zones
for price in prices:
if price.instance_type == instance_infos[def_inst_type]['type']:
avz_prices_nodes[str(price.availability_zone)].append(price)
# FSO---remove us-east-1c as access is constrained
try:
del avz_prices_nodes['us-east-1c']
except:
print( "no us-east-1c")
maxprice_nodes = dict()
for key in avz_prices_nodes:
allpr_nodes = [k.price for k in avz_prices_nodes[key]]
maxprice_nodes[key] = max(allpr_nodes)
best_avz_nodes = min(maxprice_nodes, key=maxprice_nodes.get) # gets just the first if serveral avz's are the same
print("Cheapest nodes: ", best_avz_nodes, maxprice_nodes[best_avz_nodes])
print("Ondemand nodes (EU):", ondemand_price)
if maxprice_nodes[best_avz_nodes] < ondemand_price:
return best_avz_nodes,'spot'
else:
return def_default_avz,'ondemand'
def wait_for_fulfillment(cloud,pending_ids):
"""
Wait for fulfillment of spot instance requests
"""
instances = list()
while not len(pending_ids) == 0:
pending = cloud.get_all_spot_instance_requests(pending_ids)
for request in pending:
if request.status.code == 'fulfilled':
pending_ids.pop(pending_ids.index(request.id))
print("spot request `{}` fulfilled!".format(request.id))
#print request.__dict__
instances.append(request.instance_id)
cloud.cancel_spot_instance_requests(request.id)
elif request.state == 'cancelled':
pending_ids.pop(pending_ids.index(request.id))
print("spot request `{}` cancelled!".format(request.id))
else:
print("waiting on `{}`".format(request.id))
time.sleep(5)
print("all spots fulfilled!")
return instances
def update_costs(cn=def_cn,itype='all',regions=def_regions):
"""
Updates the price tags of all running instances
"""
for region in regions:
cloud = boto.ec2.connect_to_region(region,profile_name=ec2Profile)
instances = cloud.get_all_instances()
for reservation in instances:
for inst in reservation.instances:
total_price = 0.0
if inst.state != 'terminated':
cu_time = datetime.datetime.utcnow()
it = datetime.datetime.strptime(inst.launch_time,'%Y-%m-%dT%H:%M:%S.000Z')
time_taken = cu_time - it
hours = int(math.ceil(time_taken.total_seconds()/3600.))
# FSO---for spot instances
if inst.spot_instance_request_id is not None:
# FSO---loop through hours. spot instances are billed according to the price at each full hour!
for i in range(hours):
price = cloud.get_spot_price_history(instance_type=inst.instance_type,
start_time = it.isoformat(),
end_time= it.isoformat(),
product_description='Linux/UNIX',
availability_zone=inst.placement)
# print "Hour: ", it, "price=",price
it = it + datetime.timedelta(hours=1)
total_price = total_price + price[0].price
# FSO---ondemand instances
else:
total_price = hours * find_inst_info(inst.instance_type)['price']
inst.add_tag('current_price', total_price)
inst.add_tag('billable_hours', hours)
def log_with_ts(logtext="no text given",lf=def_logfile):
"""
Helper function to write logs with timestamps
"""
# logtime = time.time()
# st = datetime.datetime.fromtimestamp(logtime).strftime('%Y-%m-%d %H:%M:%S')
st = str(datetime.datetime.utcnow())
with open(lf, "a+") as myfile:
myfile.writelines('['+st+' UTC] '+ logtext+'\n')
def spot_price(cloud,launch_time,inst_type):
"""
Helper function to get spot price"
"""
prices = dict()
#stati = datetime.datetime.utcnow()
#stati = stati - datetime.timedelta(hours=1)
#print stati
# Get prices for instance, AZ and time range
price = cloud.get_spot_price_history(instance_type=inst_type,
# start_time=stati.isoformat(),
start_time = launch_time,
end_time= launch_time,
product_description='Linux/UNIX',
availability_zone='eu-west-1a')
prices['a'] = price[0].price
price = cloud.get_spot_price_history(instance_type=inst_type,
start_time = launch_time,
end_time= launch_time,
product_description='Linux/UNIX',
availability_zone='eu-west-1b')
prices['b'] = price[0].price
price = cloud.get_spot_price_history(instance_type=inst_type,
start_time = launch_time,
end_time= launch_time,
product_description='Linux/UNIX',
availability_zone='eu-west-1c')
prices['c'] = price[0].price
cloudus = boto.ec2.connect_to_region('us-east-1')
price = cloudus.get_spot_price_history(instance_type=inst_type,
start_time = launch_time,
end_time= launch_time,
product_description='Linux/UNIX',
availability_zone='us-east-1c')
if len(price) > 0:
prices['usc'] = price[0].price
else:
prices['usc'] = 0.0
price = cloudus.get_spot_price_history(instance_type=inst_type,
start_time = launch_time,
end_time= launch_time,
product_description='Linux/UNIX',
availability_zone='us-east-1b')
if len(price) > 0:
prices['usb'] = price[0].price
else:
prices['usb'] = 0.0
#for price in price:
#print price.timestamp, price.price
return prices
def node_find(node,avz=def_default_avz):
"""
Return the instance object of a given node hostname.
"""
cloud = boto.ec2.connect_to_region(avz[:-1])
instances = cloud.get_all_instances()
for reservation in instances:
for inst in reservation.instances:
if inst.tags.get('Name') == node and inst.state == 'running':
print('found', inst.tags.get('Name'), inst.dns_name)
return inst
def node_exists(node,avz=def_default_avz):
"""
checks if node with given name exists
"""
cloud = boto.ec2.connect_to_region(avz[:-1],profile_name=ec2Profile)
instances = cloud.get_all_instances()
for reservation in instances:
for inst in reservation.instances:
if inst.tags.get('Name') == node and inst.state == 'running':
print('found', inst.tags.get('Name'), inst.dns_name)
return True
return False
def enable_root(host):
"""
Enable root access on instance
"""
env.host_string = host
env.user = 'ubuntu'
run("sudo perl -i -pe 's/disable_root: 1/disable_root: 0/' /etc/cloud/cloud.cfg")
run("sudo perl -i -pe 's/#PermitRootLogin .*/PermitRootLogin without-password/' /etc/ssh/sshd_config")
run('sudo cp -f /home/ubuntu/.ssh/authorized_keys /root/.ssh/authorized_keys', shell=True, pty=True)
run("sudo reload ssh")
def use_user_volume(host):
"""
Setup and mount user /work volume
"""
env.host_string = host
env.user = 'ubuntu'
run("test -e /dev/xvdf1 || ( sudo sgdisk -o -g -n 1:2048:0 /dev/xvdf && sudo mkfs.ext4 /dev/xvdf1 )")
run("sudo mkdir /work")
run("sudo mount -o defaults,discard /dev/xvdf1 /work")
run("echo \"/dev/xvdf1 /work ext4 defaults,discard 0 0\" | sudo tee -a /etc/fstab")
run("test -e /work/ubuntu || ( sudo mkdir /work/ubuntu && sudo chown ubuntu:ubuntu /work/ubuntu )")
def ssh_test(inst):
"""
checks for ssh connectability
"""
while True:
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(4)
sock.connect((inst.dns_name, 22))
break
except:
print('waiting for ssh daemon...')
time.sleep(5)
finally:
sock.close()
|
jlandmann/oggm
|
deployment/fabfile.py
|
Python
|
gpl-3.0
| 41,370
|
[
"NetCDF"
] |
8f9db7ca07a51372a6a6023b4532212da7ec89767ac1a2478a6e0483ded75fd7
|
from pymol import cmd
from pymol.cgo import *
# This example shows one way to create a plane behind the current
# molecule.
#
# Note that because PyMOL's ray-tracer doesn't currently support
# perspective, the plane will not look right if the edges are
# showing. Thus, it is best to zoom the image so that the edges can't
# be seen and that the plane appears infinite
#
# To use this script, setup your molecule and then "run cgo_plane.py".
# This will create a plane in space about 80% of the way to the far
# clipping plane. You can then rotate the camera around to get the
# desired shadowing.
#
# If the plane is too close to the molecule, move the rear clipping
# plane back and then re-run cgo_plane.py.
#
# NOTE that once the plane is created, there is no easy way to move it
# (other than recreating it). However, you can move you molecule
# relative to the plane using PyMOL's molecular editing features.
#
# (Remember, you can't click on cartoons, but you can click on ribbons).
#
# Good luck,
# Warren
view = cmd.get_view()
# locate plane most of the way to the rear clipping plane
plane_z = - (view[11] + (view[15]+5*view[16])/6.0)
# choose the size of the plane
plane_size = abs(view[11])/3.0
obj = []
# now create a plane in camera space
plane = [
[ -plane_size, plane_size, plane_z ],
[ plane_size, plane_size, plane_z ],
[ -plane_size, -plane_size, plane_z ],
[ plane_size, -plane_size, plane_z ]]
normal = [ 0.0, 0.0, 1.0 ]
# then transform plane coordinates into model space
plane = map( lambda p,v=view: [
v[0] * p[0] + v[1] * p[1] + v[2]* p[2],
v[3] * p[0] + v[4] * p[1] + v[5]* p[2],
v[6] * p[0] + v[7] * p[1] + v[8]* p[2]
], plane )
normal = apply( lambda p,v=view:[
v[0] * p[0] + v[1] * p[1] + v[2]* p[2],
v[3] * p[0] + v[4] * p[1] + v[5]* p[2],
v[6] * p[0] + v[7] * p[1] + v[8]* p[2]
], (normal,) )
# and position relative to the camera
plane = map( lambda p,v=view: [
p[0] + v[9 ] + v[12],
p[1] + v[10] + v[13],
p[2] + + v[14],
], plane )
# set color
obj.extend( [ COLOR, 0.8, 0.8, 0.8 ] ) # greyish
# begin triangle strip
obj.extend( [ BEGIN, TRIANGLE_STRIP ] )
# set normal
obj.append( NORMAL )
obj.extend( normal )
# draw the plane
for a in plane:
obj.append( VERTEX)
obj.extend(a)
obj.append( END )
# delete existing object (if any)
cmd.delete("cgo_plane")
# now load the new object without zooming
auto_zoom = cmd.get('auto_zoom')
cmd.set('auto_zoom', 0, quiet=1)
cmd.load_cgo(obj,'cgo_plane')
cmd.set('auto_zoom', auto_zoom, quiet=1)
|
gratefulfrog/lib
|
python/pymol/pymol_path/examples/devel/cgo_plane.py
|
Python
|
gpl-2.0
| 2,566
|
[
"PyMOL"
] |
d537117ee14f83a66ab500794a1816de86772b995002c72c723b307f5b2bec67
|
"""
========================================
Special functions (:mod:`scipy.special`)
========================================
.. module:: scipy.special
Nearly all of the functions below are universal functions and follow
broadcasting and automatic array-looping rules. Exceptions are noted.
Error handling
==============
Errors are handled by returning nans, or other appropriate values.
Some of the special function routines will emit warnings when an error
occurs. By default this is disabled. To enable such messages use
``errprint(1)``, and to disable such messages use ``errprint(0)``.
Example:
>>> print scipy.special.bdtr(-1,10,0.3)
>>> scipy.special.errprint(1)
>>> print scipy.special.bdtr(-1,10,0.3)
.. autosummary::
:toctree: generated/
errprint
Available functions
===================
Airy functions
--------------
.. autosummary::
:toctree: generated/
airy -- Airy functions and their derivatives.
airye -- Exponentially scaled Airy functions
ai_zeros -- [+]Zeros of Airy functions Ai(x) and Ai'(x)
bi_zeros -- [+]Zeros of Airy functions Bi(x) and Bi'(x)
Elliptic Functions and Integrals
--------------------------------
.. autosummary::
:toctree: generated/
ellipj -- Jacobian elliptic functions
ellipk -- Complete elliptic integral of the first kind.
ellipkm1 -- ellipkm1(x) == ellipk(1 - x)
ellipkinc -- Incomplete elliptic integral of the first kind.
ellipe -- Complete elliptic integral of the second kind.
ellipeinc -- Incomplete elliptic integral of the second kind.
Bessel Functions
----------------
.. autosummary::
:toctree: generated/
jn -- Bessel function of integer order and real argument.
jv -- Bessel function of real-valued order and complex argument.
jve -- Exponentially scaled Bessel function.
yn -- Bessel function of second kind (integer order).
yv -- Bessel function of the second kind (real-valued order).
yve -- Exponentially scaled Bessel function of the second kind.
kn -- Modified Bessel function of the second kind (integer order).
kv -- Modified Bessel function of the second kind (real order).
kve -- Exponentially scaled modified Bessel function of the second kind.
iv -- Modified Bessel function.
ive -- Exponentially scaled modified Bessel function.
hankel1 -- Hankel function of the first kind.
hankel1e -- Exponentially scaled Hankel function of the first kind.
hankel2 -- Hankel function of the second kind.
hankel2e -- Exponentially scaled Hankel function of the second kind.
The following is not an universal function:
.. autosummary::
:toctree: generated/
lmbda -- [+]Sequence of lambda functions with arbitrary order v.
Zeros of Bessel Functions
^^^^^^^^^^^^^^^^^^^^^^^^^
These are not universal functions:
.. autosummary::
:toctree: generated/
jnjnp_zeros -- [+]Zeros of integer-order Bessel functions and derivatives sorted in order.
jnyn_zeros -- [+]Zeros of integer-order Bessel functions and derivatives as separate arrays.
jn_zeros -- [+]Zeros of Jn(x)
jnp_zeros -- [+]Zeros of Jn'(x)
yn_zeros -- [+]Zeros of Yn(x)
ynp_zeros -- [+]Zeros of Yn'(x)
y0_zeros -- [+]Complex zeros: Y0(z0)=0 and values of Y0'(z0)
y1_zeros -- [+]Complex zeros: Y1(z1)=0 and values of Y1'(z1)
y1p_zeros -- [+]Complex zeros of Y1'(z1')=0 and values of Y1(z1')
Faster versions of common Bessel Functions
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. autosummary::
:toctree: generated/
j0 -- Bessel function of order 0.
j1 -- Bessel function of order 1.
y0 -- Bessel function of second kind of order 0.
y1 -- Bessel function of second kind of order 1.
i0 -- Modified Bessel function of order 0.
i0e -- Exponentially scaled modified Bessel function of order 0.
i1 -- Modified Bessel function of order 1.
i1e -- Exponentially scaled modified Bessel function of order 1.
k0 -- Modified Bessel function of the second kind of order 0.
k0e -- Exponentially scaled modified Bessel function of the second kind of order 0.
k1 -- Modified Bessel function of the second kind of order 1.
k1e -- Exponentially scaled modified Bessel function of the second kind of order 1.
Integrals of Bessel Functions
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. autosummary::
:toctree: generated/
itj0y0 -- Basic integrals of j0 and y0 from 0 to x.
it2j0y0 -- Integrals of (1-j0(t))/t from 0 to x and y0(t)/t from x to inf.
iti0k0 -- Basic integrals of i0 and k0 from 0 to x.
it2i0k0 -- Integrals of (i0(t)-1)/t from 0 to x and k0(t)/t from x to inf.
besselpoly -- Integral of a Bessel function: Jv(2* a* x) * x[+]lambda from x=0 to 1.
Derivatives of Bessel Functions
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. autosummary::
:toctree: generated/
jvp -- Nth derivative of Jv(v,z)
yvp -- Nth derivative of Yv(v,z)
kvp -- Nth derivative of Kv(v,z)
ivp -- Nth derivative of Iv(v,z)
h1vp -- Nth derivative of H1v(v,z)
h2vp -- Nth derivative of H2v(v,z)
Spherical Bessel Functions
^^^^^^^^^^^^^^^^^^^^^^^^^^
These are not universal functions:
.. autosummary::
:toctree: generated/
sph_jn -- [+]Sequence of spherical Bessel functions, jn(z)
sph_yn -- [+]Sequence of spherical Bessel functions, yn(z)
sph_jnyn -- [+]Sequence of spherical Bessel functions, jn(z) and yn(z)
sph_in -- [+]Sequence of spherical Bessel functions, in(z)
sph_kn -- [+]Sequence of spherical Bessel functions, kn(z)
sph_inkn -- [+]Sequence of spherical Bessel functions, in(z) and kn(z)
Riccati-Bessel Functions
^^^^^^^^^^^^^^^^^^^^^^^^
These are not universal functions:
.. autosummary::
:toctree: generated/
riccati_jn -- [+]Sequence of Ricatti-Bessel functions of first kind.
riccati_yn -- [+]Sequence of Ricatti-Bessel functions of second kind.
Struve Functions
----------------
.. autosummary::
:toctree: generated/
struve -- Struve function --- Hv(x)
modstruve -- Modified Struve function --- Lv(x)
itstruve0 -- Integral of H0(t) from 0 to x
it2struve0 -- Integral of H0(t)/t from x to Inf.
itmodstruve0 -- Integral of L0(t) from 0 to x.
Raw Statistical Functions
-------------------------
.. seealso:: :mod:`scipy.stats`: Friendly versions of these functions.
.. autosummary::
:toctree: generated/
bdtr -- Sum of terms 0 through k of of the binomial pdf.
bdtrc -- Sum of terms k+1 through n of the binomial pdf.
bdtri -- Inverse of bdtr
btdtr -- Integral from 0 to x of beta pdf.
btdtri -- Quantiles of beta distribution
fdtr -- Integral from 0 to x of F pdf.
fdtrc -- Integral from x to infinity under F pdf.
fdtri -- Inverse of fdtrc
gdtr -- Integral from 0 to x of gamma pdf.
gdtrc -- Integral from x to infinity under gamma pdf.
gdtria --
gdtrib --
gdtrix --
nbdtr -- Sum of terms 0 through k of the negative binomial pdf.
nbdtrc -- Sum of terms k+1 to infinity under negative binomial pdf.
nbdtri -- Inverse of nbdtr
pdtr -- Sum of terms 0 through k of the Poisson pdf.
pdtrc -- Sum of terms k+1 to infinity of the Poisson pdf.
pdtri -- Inverse of pdtr
stdtr -- Integral from -infinity to t of the Student-t pdf.
stdtridf --
stdtrit --
chdtr -- Integral from 0 to x of the Chi-square pdf.
chdtrc -- Integral from x to infnity of Chi-square pdf.
chdtri -- Inverse of chdtrc.
ndtr -- Integral from -infinity to x of standard normal pdf
ndtri -- Inverse of ndtr (quantiles)
smirnov -- Kolmogorov-Smirnov complementary CDF for one-sided test statistic (Dn+ or Dn-)
smirnovi -- Inverse of smirnov.
kolmogorov -- The complementary CDF of the (scaled) two-sided test statistic (Kn*) valid for large n.
kolmogi -- Inverse of kolmogorov
tklmbda -- Tukey-Lambda CDF
logit --
expit --
Gamma and Related Functions
---------------------------
.. autosummary::
:toctree: generated/
gamma -- Gamma function.
gammaln -- Log of the absolute value of the gamma function.
gammasgn -- Sign of the gamma function.
gammainc -- Incomplete gamma integral.
gammaincinv -- Inverse of gammainc.
gammaincc -- Complemented incomplete gamma integral.
gammainccinv -- Inverse of gammaincc.
beta -- Beta function.
betaln -- Log of the absolute value of the beta function.
betainc -- Incomplete beta integral.
betaincinv -- Inverse of betainc.
psi -- Logarithmic derivative of the gamma function.
rgamma -- One divided by the gamma function.
polygamma -- Nth derivative of psi function.
multigammaln
Error Function and Fresnel Integrals
------------------------------------
.. autosummary::
:toctree: generated/
erf -- Error function.
erfc -- Complemented error function (1- erf(x))
erfcx -- Scaled complemented error function exp(x**2)*erfc(x)
erfi -- Imaginary error function, -i erf(i x)
erfinv -- Inverse of error function
erfcinv -- Inverse of erfc
wofz -- Fadeeva function.
dawsn -- Dawson's integral.
fresnel -- Fresnel sine and cosine integrals.
fresnel_zeros -- Complex zeros of both Fresnel integrals
modfresnelp -- Modified Fresnel integrals F_+(x) and K_+(x)
modfresnelm -- Modified Fresnel integrals F_-(x) and K_-(x)
These are not universal functions:
.. autosummary::
:toctree: generated/
erf_zeros -- [+]Complex zeros of erf(z)
fresnelc_zeros -- [+]Complex zeros of Fresnel cosine integrals
fresnels_zeros -- [+]Complex zeros of Fresnel sine integrals
Legendre Functions
------------------
.. autosummary::
:toctree: generated/
lpmv -- Associated Legendre Function of arbitrary non-negative degree v.
sph_harm -- Spherical Harmonics (complex-valued) Y^m_n(theta,phi)
These are not universal functions:
.. autosummary::
:toctree: generated/
lpn -- [+]Legendre Functions (polynomials) of the first kind
lqn -- [+]Legendre Functions of the second kind.
lpmn -- [+]Associated Legendre Function of the first kind.
lqmn -- [+]Associated Legendre Function of the second kind.
Orthogonal polynomials
----------------------
The following functions evaluate values of orthogonal polynomials:
.. autosummary::
:toctree: generated/
eval_legendre
eval_chebyt
eval_chebyu
eval_chebyc
eval_chebys
eval_jacobi
eval_laguerre
eval_genlaguerre
eval_hermite
eval_hermitenorm
eval_gegenbauer
eval_sh_legendre
eval_sh_chebyt
eval_sh_chebyu
eval_sh_jacobi
The functions below, in turn, return :ref:`orthopoly1d` objects, which
functions similarly as :ref:`numpy.poly1d`. The :ref:`orthopoly1d`
class also has an attribute ``weights`` which returns the roots, weights,
and total weights for the appropriate form of Gaussian quadrature.
These are returned in an ``n x 3`` array with roots in the first column,
weights in the second column, and total weights in the final column.
.. autosummary::
:toctree: generated/
legendre -- [+]Legendre polynomial P_n(x) (lpn -- for function).
chebyt -- [+]Chebyshev polynomial T_n(x)
chebyu -- [+]Chebyshev polynomial U_n(x)
chebyc -- [+]Chebyshev polynomial C_n(x)
chebys -- [+]Chebyshev polynomial S_n(x)
jacobi -- [+]Jacobi polynomial P^(alpha,beta)_n(x)
laguerre -- [+]Laguerre polynomial, L_n(x)
genlaguerre -- [+]Generalized (Associated) Laguerre polynomial, L^alpha_n(x)
hermite -- [+]Hermite polynomial H_n(x)
hermitenorm -- [+]Normalized Hermite polynomial, He_n(x)
gegenbauer -- [+]Gegenbauer (Ultraspherical) polynomials, C^(alpha)_n(x)
sh_legendre -- [+]shifted Legendre polynomial, P*_n(x)
sh_chebyt -- [+]shifted Chebyshev polynomial, T*_n(x)
sh_chebyu -- [+]shifted Chebyshev polynomial, U*_n(x)
sh_jacobi -- [+]shifted Jacobi polynomial, J*_n(x) = G^(p,q)_n(x)
.. warning::
Large-order polynomials obtained from these functions
are numerically unstable.
``orthopoly1d`` objects are converted to ``poly1d``, when doing
arithmetic. ``numpy.poly1d`` works in power basis and cannot
represent high-order polynomials accurately, which can cause
significant inaccuracy.
Hypergeometric Functions
------------------------
.. autosummary::
:toctree: generated/
hyp2f1 -- Gauss hypergeometric function (2F1)
hyp1f1 -- Confluent hypergeometric function (1F1)
hyperu -- Confluent hypergeometric function (U)
hyp0f1 -- Confluent hypergeometric limit function (0F1)
hyp2f0 -- Hypergeometric function (2F0)
hyp1f2 -- Hypergeometric function (1F2)
hyp3f0 -- Hypergeometric function (3F0)
Parabolic Cylinder Functions
----------------------------
.. autosummary::
:toctree: generated/
pbdv -- Parabolic cylinder function Dv(x) and derivative.
pbvv -- Parabolic cylinder function Vv(x) and derivative.
pbwa -- Parabolic cylinder function W(a,x) and derivative.
These are not universal functions:
.. autosummary::
:toctree: generated/
pbdv_seq -- [+]Sequence of parabolic cylinder functions Dv(x)
pbvv_seq -- [+]Sequence of parabolic cylinder functions Vv(x)
pbdn_seq -- [+]Sequence of parabolic cylinder functions Dn(z), complex z
Mathieu and Related Functions
-----------------------------
.. autosummary::
:toctree: generated/
mathieu_a -- Characteristic values for even solution (ce_m)
mathieu_b -- Characteristic values for odd solution (se_m)
These are not universal functions:
.. autosummary::
:toctree: generated/
mathieu_even_coef -- [+]sequence of expansion coefficients for even solution
mathieu_odd_coef -- [+]sequence of expansion coefficients for odd solution
The following return both function and first derivative:
.. autosummary::
:toctree: generated/
mathieu_cem -- Even Mathieu function
mathieu_sem -- Odd Mathieu function
mathieu_modcem1 -- Even modified Mathieu function of the first kind
mathieu_modcem2 -- Even modified Mathieu function of the second kind
mathieu_modsem1 -- Odd modified Mathieu function of the first kind
mathieu_modsem2 -- Odd modified Mathieu function of the second kind
Spheroidal Wave Functions
-------------------------
.. autosummary::
:toctree: generated/
pro_ang1 -- Prolate spheroidal angular function of the first kind
pro_rad1 -- Prolate spheroidal radial function of the first kind
pro_rad2 -- Prolate spheroidal radial function of the second kind
obl_ang1 -- Oblate spheroidal angular function of the first kind
obl_rad1 -- Oblate spheroidal radial function of the first kind
obl_rad2 -- Oblate spheroidal radial function of the second kind
pro_cv -- Compute characteristic value for prolate functions
obl_cv -- Compute characteristic value for oblate functions
pro_cv_seq -- Compute sequence of prolate characteristic values
obl_cv_seq -- Compute sequence of oblate characteristic values
The following functions require pre-computed characteristic value:
.. autosummary::
:toctree: generated/
pro_ang1_cv -- Prolate spheroidal angular function of the first kind
pro_rad1_cv -- Prolate spheroidal radial function of the first kind
pro_rad2_cv -- Prolate spheroidal radial function of the second kind
obl_ang1_cv -- Oblate spheroidal angular function of the first kind
obl_rad1_cv -- Oblate spheroidal radial function of the first kind
obl_rad2_cv -- Oblate spheroidal radial function of the second kind
Kelvin Functions
----------------
.. autosummary::
:toctree: generated/
kelvin -- All Kelvin functions (order 0) and derivatives.
kelvin_zeros -- [+]Zeros of All Kelvin functions (order 0) and derivatives
ber -- Kelvin function ber x
bei -- Kelvin function bei x
berp -- Derivative of Kelvin function ber x
beip -- Derivative of Kelvin function bei x
ker -- Kelvin function ker x
kei -- Kelvin function kei x
kerp -- Derivative of Kelvin function ker x
keip -- Derivative of Kelvin function kei x
These are not universal functions:
.. autosummary::
:toctree: generated/
ber_zeros -- [+]Zeros of Kelvin function bei x
bei_zeros -- [+]Zeros of Kelvin function ber x
berp_zeros -- [+]Zeros of derivative of Kelvin function ber x
beip_zeros -- [+]Zeros of derivative of Kelvin function bei x
ker_zeros -- [+]Zeros of Kelvin function kei x
kei_zeros -- [+]Zeros of Kelvin function ker x
kerp_zeros -- [+]Zeros of derivative of Kelvin function ker x
keip_zeros -- [+]Zeros of derivative of Kelvin function kei x
Other Special Functions
-----------------------
.. autosummary::
:toctree: generated/
binom -- Binomial coefficient.
expn -- Exponential integral.
exp1 -- Exponential integral of order 1 (for complex argument)
expi -- Another exponential integral -- Ei(x)
shichi -- Hyperbolic sine and cosine integrals.
sici -- Integral of the sinc and "cosinc" functions.
spence -- Dilogarithm integral.
lambertw -- Lambert W function
zeta -- Riemann zeta function of two arguments.
zetac -- 1.0 - standard Riemann zeta function.
Convenience Functions
---------------------
.. autosummary::
:toctree: generated/
cbrt -- Cube root.
exp10 -- 10 raised to the x power.
exp2 -- 2 raised to the x power.
radian -- radian angle given degrees, minutes, and seconds.
cosdg -- cosine of the angle given in degrees.
sindg -- sine of the angle given in degrees.
tandg -- tangent of the angle given in degrees.
cotdg -- cotangent of the angle given in degrees.
log1p -- log(1+x)
expm1 -- exp(x)-1
cosm1 -- cos(x)-1
round -- round the argument to the nearest integer. If argument ends in 0.5 exactly, pick the nearest even integer.
.. [+] in the description indicates a function which is not a universal
.. function and does not follow broadcasting and automatic
.. array-looping rules.
"""
from __future__ import division, print_function, absolute_import
from ._ufuncs import *
from ._ufuncs_cxx import *
from .basic import *
from . import specfun
from . import orthogonal
from .orthogonal import *
from .spfun_stats import multigammaln
from .lambertw import lambertw
__all__ = [s for s in dir() if not s.startswith('_')]
from numpy.dual import register_func
register_func('i0',i0)
del register_func
from numpy.testing import Tester
test = Tester().test
|
Universal-Model-Converter/UMC3.0a
|
data/Python/x86/Lib/site-packages/scipy/special/__init__.py
|
Python
|
mit
| 19,011
|
[
"Gaussian"
] |
0fc37f3135fc5924bf5bcbddcf7be7a98d2fc0aba5e1f516d133b1988cac71f0
|
# -*- coding: utf-8 -*-
"""
ORCA Open Remote Control Application
Copyright (C) 2013-2020 Carsten Thielepape
Please contact me by : http://www.orca-remote.org/
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from typing import Dict
from typing import List
from typing import Union
from typing import Any
from typing import Tuple
from typing import Optional
from copy import copy
from xml.etree.ElementTree import Element
from kivy.logger import Logger
from kivy.clock import Clock
from ORCA import Globals as Globals
from ORCA.Action import GetActionID
from ORCA.Action import cAction
from ORCA.interfaces.BaseTrigger import cBaseTrigger
from ORCA.BaseSettings import cBaseSettings
from ORCA.ui.ShowErrorPopUp import ShowErrorPopUp
from ORCA.utils.CachedFile import CachedFile
from ORCA.utils.TypeConvert import ToBool
from ORCA.utils.XML import Orca_FromString, Orca_include, orca_et_loader
from ORCA.vars.Access import SetVar
from ORCA.utils.FileName import cFileName
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from ORCA.interfaces.BaseInterface import cBaseInterFace
from ORCA.interfaces.InterfaceResultParser import cInterFaceResultParser
else:
from typing import TypeVar
cBaseInterFace = TypeVar("cBaseInterFace")
cInterFaceResultParser = TypeVar("cInterFaceResultParser")
__all__ = ['cBaseInterFaceSettings']
class cBaseInterFaceSettings(cBaseSettings):
""" A base class for the interfacesettings """
def __init__(self, oInterFace):
# some default settings, which should be there even if not configered by the interface
# use the exact spelling as in the settings json
super().__init__(oInterFace)
self.oInterFace:cBaseInterFace = oInterFace
self.uConfigName:str = "DEVICE_DEFAULT"
self.uType:str = "interface"
self.aIniSettings.bDisableInterFaceOnError = False
self.aIniSettings.bDisconnectInterFaceOnSleep = True
self.aIniSettings.fTimeOut = 1.0
self.aIniSettings.iTimeToClose = -1
self.aIniSettings.uFNCodeset = u''
self.aIniSettings.uHost = u'192.168.1.2'
self.aIniSettings.uParseResultOption = u''
self.aIniSettings.uParseResultTokenizeString = u''
self.aIniSettings.uParseResultFlags = u''
self.aIniSettings.uPort = u'80'
self.aIniSettings.uResultEndString = u'\\n'
self.aIniSettings.uDiscoverScriptName = u''
self.bInConnect:bool = False
self.bIsConnected:bool = False
self.bResumeConnection:bool = False
self.dTriggers:Dict[str,cBaseTrigger] = {}
self.iDiscoverCount:int = 0
self.iMaxDiscoverCount:int = 1
self.oAction:Union[cAction,None] = None
self.oLastAction:Union[cAction,None] = None
self.dStandardActions:Dict[str,Union[cAction,None]] = {"ping":None,"defaultresponse":None}
self.bStandardActionsLoaded:bool = False
self.oResultParser:Union[cInterFaceResultParser,None] = None
self.dNewTriggers:Dict[str,List[cBaseTrigger]] = {}
def ReadStandardActions(self) -> None:
""" Reads the standard codeset codes eg ping """
if not self.bStandardActionsLoaded:
for uKey in self.dStandardActions:
oAction:cAction = self.ReadStandardActions_sub(self.dStandardActions[uKey],uKey)
if oAction:
self.dStandardActions[uKey]=oAction
self.bStandardActionsLoaded=True
def ReadStandardActions_sub(self,oTargetAction:cAction,uActionName:str) -> Union[cAction,None]:
"""
Sub Routine to read the standard actions
:param cAction|None oTargetAction: The Action for a standradextion, should be None as input
:param str uActionName: The Actionname
"""
if oTargetAction is None or oTargetAction==u'':
aActions:List[cAction] = Globals.oActions.GetActionList(uActionName = self.MakeLocalActionName(uActionName), bNoCopy=False)
if aActions is not None:
if len(aActions)==1:
return aActions[0]
else:
Logger.error("StandardCodesetCodes can''t be multiline:"+uActionName)
return None
def ExecuteStandardAction(self,uActionName:str) -> int:
"""
Executes as standard action
:param string uActionName:
:return: The return code of the action
"""
aActions:List[cAction]=Globals.oActions.GetActionList(uActionName = self.MakeLocalActionName(uActionName), bNoCopy = False)
if aActions is not None:
return Globals.oEvents.ExecuteActionsNewQueue(aActions, None, True)
else:
return 0
# noinspection PyUnusedLocal
def Discover(self,**kwargs) -> bool:
""" helper for the discover scripts"""
if self.aIniSettings.uHost!="discover":
return True
self.iDiscoverCount += 1
if self.iDiscoverCount > self.iMaxDiscoverCount:
return False
self.ShowDebug(uMsg=u'Try to discover device')
uDiscoverScriptName:str = self.aIniSettings.uDiscoverScriptName
dParams:Dict[str,Any] = {}
for uKey in self.aIniSettings:
if uKey[1:].startswith(uDiscoverScriptName.upper()):
uParamKey=uKey[len(uDiscoverScriptName)+2:]
dParams[uParamKey]=self.aIniSettings[uKey]
dResult:Dict = Globals.oScripts.RunScript(uDiscoverScriptName, **dParams)
oException:Exception = dResult.get('Exception','')
if oException is None or oException=='':
for uKey in dResult:
if uKey != Exception:
self.aIniSettings[uKey]=dResult[uKey]
if uKey == 'Host' and dResult.get("Hostname","")=="":
if self.aIniSettings.bSaveDiscoveredIP:
self.oInterFace.oObjectConfig.oConfigParser.set(self.uSection, u'olddiscoveredip', self.aIniSettings.uHost)
self.oInterFace.oObjectConfig.oConfigParser.write()
if uKey == 'Hostname' and dResult.get("Hostname","")!="":
self.aIniSettings["Host"]=dResult[uKey]
if self.aIniSettings.bSaveDiscoveredIP:
self.oInterFace.oObjectConfig.oConfigParser.set(self.uSection, u'olddiscoveredip', self.aIniSettings.uHostname)
self.oInterFace.oObjectConfig.oConfigParser.write()
return True
else:
self.ShowError(uMsg=u'Can''t discover device:' + self.oInterFace.oObjectConfig.oFnConfig.string + u' Section:' + self.uSection, oException=oException)
return False
def ReadCodeset(self) -> None:
""" reads the codeset file """
oTmpCodeSetAction:cAction
aTmpCodeSetAction:List[cAction]
oCodesetFileName:cFileName = self.oInterFace.FindCodesetFile(self.aIniSettings.uFNCodeset)
if oCodesetFileName is None:
self.ShowDebug(uMsg=u'Cannot Read Codeset (Not Found):' + self.aIniSettings.uFNCodeset)
return
self.ShowDebug(uMsg=u'Read Codeset:'+oCodesetFileName)
if oCodesetFileName.Exists():
uET_Data:str = CachedFile(oFileName=oCodesetFileName)
oET_Root:Element = Orca_FromString(uET_Data=uET_Data,oDef=None,uFileName=oCodesetFileName.string)
Orca_include(oET_Root,orca_et_loader)
dTmpCodeSetActions:Dict[str,List[cAction]] = {}
Globals.oActions.LoadActionsSub(oET_Root=oET_Root ,uSegmentTag=u'',uListTag=u'action', dTargetDic=dTmpCodeSetActions,uFileName=oCodesetFileName.string)
# replacing alias
bDoItAgain:bool = True
uKey:str = u''
uAlias:str = u''
try:
# replace all alias
while bDoItAgain:
bDoItAgain=False
try:
for uKey in dTmpCodeSetActions:
iPos:int = -1
for oTmpCodeSetAction in dTmpCodeSetActions[uKey]:
iPos += 1
if oTmpCodeSetAction.dActionPars.get('type','')=="alias":
uAlias:str = oTmpCodeSetAction.dActionPars['cmd']
aAliasCodeSet:List[cAction] = dTmpCodeSetActions[uAlias]
if len(aAliasCodeSet)==1:
oTmpCodeSetAction = copy(aAliasCodeSet[0])
oTmpCodeSetAction.uActionName= uAlias
else:
oTmpCodeSetAction.uActionString="call"
oTmpCodeSetAction.dActionPars["actionname"]=uAlias
oTmpCodeSetAction.iActionId=GetActionID(oTmpCodeSetAction.uActionString)
oTmpCodeSetAction.dActionPars["type"]=""
dTmpCodeSetActions[uKey][iPos]=oTmpCodeSetAction
bDoItAgain=True
except Exception as e:
uMsg:str = self.ShowError(uMsg=u'Cannot read Codeset (wrong alias [%s=%s] CodesetFileName: %s):'% (uKey,uAlias,oCodesetFileName.string),oException=e)
ShowErrorPopUp(uTitle='Error Reading Codeset',uMessage=uMsg)
# Make calls local & Read the common attributes
for uKey in dTmpCodeSetActions:
for oTmpCodeSetAction in dTmpCodeSetActions[uKey]:
if oTmpCodeSetAction.iActionId==Globals.oActions.oActionType.Call:
uActionName:str = oTmpCodeSetAction.dActionPars.get("actionname","")
if uActionName in dTmpCodeSetActions:
oTmpCodeSetAction.dActionPars["actionname"] = self.MakeLocalActionName(uActionName)
self.ReadAction(oTmpCodeSetAction)
# add them to the global action list
for uKey in dTmpCodeSetActions:
Globals.oActions.SetActionList(self.MakeLocalActionName(uKey),dTmpCodeSetActions[uKey])
except Exception as e:
uMsg:str = self.ShowError(uMsg=u'Cannot read Codeset :',oException=e)
ShowErrorPopUp(uTitle='Error Reading Codeset',uMessage=uMsg)
self.SetContextVar(uVarName="firstcall",uVarValue="1")
def ReadAction(self,oAction:cAction) -> None:
"""
Adds and defaults some common attributes to the action
:param cAction oAction: Reads an action from the action pars
"""
oAction.uType = oAction.dActionPars.get(u'type', u'send')
oAction.uCmd = oAction.dActionPars.get(u'cmd', u'No cmd action defined')
oAction.uLocalDestVar = oAction.dActionPars.get(u'ldestvar', u'RESULT_' + oAction.uActionName)
oAction.uGlobalDestVar = oAction.dActionPars.get(u'gdestvar', u'RESULT_' + oAction.uActionName)
oAction.uGetVar = oAction.dActionPars.get(u'getvar', u'')
oAction.bWaitForResponse = ToBool(oAction.dActionPars.get(u'waitforresponse', u'0'))
oAction.uParseResultOption = oAction.dActionPars.get(u'parseoption', self.aIniSettings.uParseResultOption)
oAction.uParseResultTokenizeString = oAction.dActionPars.get(u'parsetoken', self.aIniSettings.uParseResultTokenizeString)
oAction.uParseResultFlags = oAction.dActionPars.get(u'parseflags', self.aIniSettings.uParseResultFlags)
oAction.uResultEndString = oAction.dActionPars.get(u'parseendstring', self.aIniSettings.uResultEndString)
oAction.dActionPars['interface'] = self.oInterFace.uObjectName
oAction.dActionPars['configname'] = self.uConfigName
if oAction.dActionPars.get('varcontext','')=="codeset":
oAction.dActionPars["varcontext"]=self.uContext
def MakeLocalActionName(self,uActionName:str) -> str:
"""
Creates a (codeset) local version of an action
:param uActionName:
:return:
"""
return uActionName+" :"+self.uContext
def Connect(self) -> bool:
""" basic helper for managing connect """
if self.bOnError:
self.ShowDebug(uMsg=u'Interface Connect: Interface is on Error, setting interface to disconnected')
self.bIsConnected=False
if not self.aIniSettings.bDisableInterFaceOnError:
self.bOnError=False
if self.bIsConnected:
self.ShowDebug(uMsg=u'Interface Connect: Interface is connected, no connect required.')
return False
self.ReadStandardActions()
if self.bOnError:
return False
uOldHost:str = self.aIniSettings.uHost
if self.aIniSettings.get("bSaveDiscoveredIP") is not None and self.aIniSettings.get("uOldDiscoveredIP") is not None:
if self.aIniSettings.bSaveDiscoveredIP and self.aIniSettings.uOldDiscoveredIP != '' and self.aIniSettings.uHost== u'discover':
self.aIniSettings.uHost=self.aIniSettings.uOldDiscoveredIP
self.ShowDebug(uMsg="Reusing previous discovered IP:"+self.aIniSettings.uOldDiscoveredIP)
elif self.aIniSettings.uHost==u'discover':
bRet:bool = self.Discover()
if not bRet:
if self.aIniSettings.uOldDiscoveredIP!="":
self.aIniSettings.uHost=self.aIniSettings.uOldDiscoveredIP
if self.aIniSettings.uHost.startswith('linked:'):
self.aIniSettings.uHost=self.oInterFace.oObjectConfig.GetSettingParFromVar(self.aIniSettings.uHost)
self.ShowDebug(uMsg=u'Pulled crosslinked var: %s=%s' %(uOldHost,self.aIniSettings.uHost))
if self.aIniSettings.uHost=="discover":
return False
return True
def AddTrigger(self,uTrigger:str,uActionName:str,uRetVar:str,uGetVar:str) -> cBaseTrigger:
"""
Adds a trigger
:rtype: cBaseTrigger
:param string uTrigger: The name of the trigger
:param string uActionName: The Action the get triggered
:param string uRetVar: The return var
:param string uGetVar: The var to parse
:return: The trigger
"""
oTrigger:cBaseTrigger = cBaseTrigger()
oTrigger.uTriggerAction = uActionName
oTrigger.uRetVar = uRetVar
oTrigger.uGetVar = uGetVar
oTrigger.uTriggerName = uTrigger
oTrigger.uGlobalDestVar = uRetVar
oTrigger.uLocalDestVar = uRetVar
# If we link to a codesetcode setting
'''
if uGetVar.startswith(u'codesetcode:'):
uActionName = self.MakeLocalActionName(uGetVar[12:])
aActions = Globals.oActions.dActionsCommands.get(uActionName)
if aActions is not None:
oAction=aActions[0]
if oAction.uGetVar != u'':
oTrigger.uGetVar=oAction.uGetVar
if oAction.uGlobalDestVar != u'':
oTrigger.uRetVar = oAction.uGlobalDestVar
oTrigger.uTriggerName = oAction.uCmd
'''
self.dTriggers[uTrigger] = oTrigger
return self.AddTriggerNew(uTrigger,uActionName,uRetVar,uGetVar)
return oTrigger
def AddTriggerNew(self,uTrigger:str,uActionName:str,uRetVar:str,uGetVar:str) -> cBaseTrigger:
"""
Adds a trigger
:rtype: cBaseTrigger
:param string uTrigger: The name of the trigger
:param string uActionName: The Action to call
:param string uRetVar: The return var
:param string uGetVar: The var to parse
:return: The trigger
"""
oTrigger:cBaseTrigger = cBaseTrigger()
oTrigger.uTriggerAction = uActionName
oTrigger.uRetVar = uRetVar
oTrigger.uGetVar = uGetVar
oTrigger.uTriggerName = uTrigger
oTrigger.uGlobalDestVar = uRetVar
oTrigger.uLocalDestVar = uRetVar
aCurrentTriggers:List[cBaseTrigger]=self.dNewTriggers.get(uTrigger,[])
aCurrentTriggers.append(oTrigger)
self.dNewTriggers[uTrigger]=aCurrentTriggers
return oTrigger
def DelTrigger(self,uTrigger:str,uActionName:str) -> None:
"""
deletes a trigger
:param string uTrigger: The Name of the trigger to delete
:param string uActionName: The Action which has been registered
"""
oTrigger:cBaseTrigger = cBaseTrigger()
if uTrigger in self.dNewTriggers:
aCurrentTriggers:List[cBaseTrigger]=self.dNewTriggers.get(uTrigger,[])
aCurrentTriggers[:] = [oTrigger for oTrigger in aCurrentTriggers if oTrigger.uTriggerAction!=uActionName]
def GetTrigger(self,uTrigger:str) -> List[cBaseTrigger]:
"""
We do not use the index, as the uTrigger might not reflect the Trigger Name,
it could be an Trigger parsed by the result eg defined by an codesetcode
:param string uTrigger:
:return:
"""
aTriggers:List[cBaseTrigger]
aResult:List[cBaseTrigger] = []
oTrigger:cBaseTrigger
for uTriggerIdx in self.dNewTriggers:
aTriggers = self.dNewTriggers[uTriggerIdx]
for oTrigger in aTriggers:
if oTrigger.uTriggerName == uTrigger[:len(oTrigger.uTriggerName)]:
aResult.append(oTrigger)
return aResult
def CallTrigger(self,oTrigger:cBaseTrigger,uResponse:str) -> None:
"""
calls a trigger
:param cBaseTrigger oTrigger: The trigger object
:param str uResponse: The response of the trigger action
:return: None
"""
# if oTrigger.uTriggerAction=='':
# self.ShowWarning(u'No Trigger Action defined for Trigger:' + oTrigger.uTriggerName)
# # return
self.ShowDebug(uMsg=oTrigger.uTriggerName+":"u'Trigger Action:'+oTrigger.uTriggerAction)
uCmd:str
vRetVal:Union[str,Tuple]
uRetVal:str
oAction:Union[None,cAction] = None
if oTrigger.uGetVar.startswith(u'codesetcode:'):
uActionName:str = self.MakeLocalActionName(oTrigger.uGetVar[12:])
aActions:List[cAction] = Globals.oActions.GetActionList(uActionName = uActionName, bNoCopy=False)
if aActions is not None:
oAction=aActions[0]
if oAction.uGetVar != u'':
oTrigger.uGetVar=oAction.uGetVar
if oAction.uGlobalDestVar != u'':
oTrigger.uRetVar = oAction.uGlobalDestVar
oTrigger.uTriggerName = oAction.uCmd
if oAction is None:
if self.oAction:
oAction = copy(self.oAction)
else:
oAction = copy(self.oLastAction)
oAction.uActionName = oTrigger.uTriggerAction
oAction.uGetVar = oTrigger.uGetVar
oAction.uGlobalDestVar = oTrigger.uRetVar
#We call ParseResult to set the Values to proper Global Vars
uCmd,vRetVal = self.oInterFace.ParseResult(oAction,uResponse,self)
if isinstance(vRetVal,tuple):
uRetVal = vRetVal[0]
else:
uRetVal = vRetVal
if oTrigger.uRetVar != u'' and uRetVal != u'':
SetVar(uVarName = oTrigger.uRetVar, oVarValue = uRetVal)
if oAction.uActionName != u'':
aActions=Globals.oEvents.CreateSimpleActionList(aActions=[{'string':'call','actionname':oTrigger.uTriggerAction,'name':oAction.uActionName}])
Globals.oEvents.ExecuteActionsNewQueue(aActions,Globals.oTheScreen.oCurrentPage.oWidgetBackGround)
def DeInit(self) -> None:
""" Deinits the interfaces """
Clock.unschedule(self.FktDisconnect)
self.Disconnect()
# noinspection PyUnusedLocal
def FktDisconnect(self,*largs) -> None:
""" Helper for scheduled (timed) disconnect """
self.Disconnect()
def Disconnect(self) -> bool:
""" Basic disconnect function """
self.ShowDebug(uMsg=u'Base Disconnect #1:Closing Connection')
if not self.bIsConnected:
return False
self.ShowDebug(uMsg=u'Base Disconnect #2:Closing Connection')
self.bIsConnected = False
if self.bOnError:
return False
self.ShowDebug(uMsg=u'Closing Connection')
Clock.unschedule(self.FktDisconnect)
return True
def GetTriggerOld(self,uTrigger:str) -> Union[cBaseTrigger,None]:
"""
We do not use the index, as the uTrigger might not reflect the Trigger Name,
it could be an Trigger parsed by the result eg defined by an codesetcode
:param string uTrigger:
:return:
"""
for uTriggerIdx in self.dTriggers:
oTrigger = self.dTriggers[uTriggerIdx]
if oTrigger.uTriggerName == uTrigger[:len(oTrigger.uTriggerName)]:
return oTrigger
return None
def DelTriggerOld(self,uTrigger:str) -> None:
"""
deletes a trigger
:param string uTrigger: The Name of the trigger to delete
"""
if uTrigger in self.dTriggers:
del self.dTriggers[uTrigger]
|
thica/ORCA-Remote
|
src/ORCA/interfaces/BaseInterfaceSettings.py
|
Python
|
gpl-3.0
| 24,195
|
[
"ORCA"
] |
3dc18bf9886be9b43b18dcc12e46ae32d02323a39172719e695dead2b70e87a0
|
import numpy as np
import matplotlib.pyplot as plt
from astropy.table import Table
# robust standard deviation
def sigG(arr):
return 0.741*(np.quantile(arr, 0.75)-np.quantile(arr, 0.25))
def printStats(arr):
print(' ', np.min(arr), np.mean(arr), np.median(arr), np.max(arr), np.size(arr))
return
def checkNobs(d, band):
str1 = band + '_Nobs_old'
arr1 = d[str1]
print(band, 'band, OLD:')
printStats(arr1)
str1 = band + '_Nobs_new'
arr1 = d[str1]
print(' NEW:')
printStats(arr1)
print(' DIFF:')
str2 = 'd' + band
arr2 = d[str2]
printStats(arr2)
return
def selectCatalog(sdssIn, sdssOut, aux='_new'):
print('starting with', len(sdssIn))
a1 = sdssIn['g_Nobs'+aux]
a2 = sdssIn['r_Nobs'+aux]
a3 = sdssIn['i_Nobs'+aux]
mOK = sdssIn[(a1>3)&(a2>3)&(a3>3)]
print('after Nobs cuts:', len(mOK))
# chi2<3 cut:
a1 = mOK['g_chi2'+aux]
a2 = mOK['r_chi2'+aux]
a3 = mOK['i_chi2'+aux]
mOK2 = mOK[(a1<3)&(a2<3)&(a3<3)]
print('after chi2 cuts:', len(mOK2))
# and the final standard error of the mean r band mag: <0.05 mag
sdssOut = mOK2[mOK2['r_mErr'+aux]<0.05]
print('after r_mErr cut:', len(sdssOut))
return sdssOut
def getSSCentry(df, i, aux='_new'):
entry = {}
ra = df['ra'+aux][i]
dec = df['dec'+aux][i]
raRMS = df['raRMS'+aux][i]
decRMS = df['raRMS'+aux][i]
nEpochs = df['nEpochs'+aux][i]
AR_val = df['AR_val'+aux][i]
entry['coord'] = (ra, dec, raRMS, decRMS, nEpochs, AR_val)
for b in ('u','g','r','i','z'):
lst = []
for q in ('Nobs', 'mMed', 'mMean', 'mErr', 'rms_scatt', 'chi2'):
qb = b + '_' + q + aux
lst.append(df[qb][i])
entry[b] = lst
return entry
def SSCentryToOutFileRow(entry, SSCindex, OutFile):
OutFile.write('%s' % SSCindex)
format = '%12.6f %10.6f %.4f %.4f %3d %7.3f'
OutFile.write(format % (entry['coord']))
format = '%3d %.3f %.3f %.3f %.3f %.3f '
for i in ('u', 'g', 'r', 'i', 'z'):
sss = (entry[i][0], entry[i][1], entry[i][2], entry[i][3], entry[i][4], entry[i][5])
OutFile.write(format % sss)
OutFile.write('\n')
return
# given vectors x and y, fit medians in bins from xMin to xMax, with Nbin steps,
# and return xBin, medianBin, medianErrBin
def fitMedians(x, y, xMin, xMax, Nbin, verbose=1):
# first generate bins
xEdge = np.linspace(xMin, xMax, (Nbin+1))
xBin = np.linspace(0, 1, Nbin)
nPts = 0*np.linspace(0, 1, Nbin)
medianBin = 0*np.linspace(0, 1, Nbin)
sigGbin = -1+0*np.linspace(0, 1, Nbin)
for i in range(0, Nbin):
xBin[i] = 0.5*(xEdge[i]+xEdge[i+1])
yAux = y[(x>xEdge[i])&(x<=xEdge[i+1])]
if (yAux.size > 0):
nPts[i] = yAux.size
medianBin[i] = np.median(yAux)
# robust estimate of standard deviation: 0.741*(q75-q25)
sigmaG = 0.741*(np.percentile(yAux,75)-np.percentile(yAux,25))
# uncertainty of the median: sqrt(pi/2)*st.dev/sqrt(N)
sigGbin[i] = np.sqrt(np.pi/2)*sigmaG/np.sqrt(nPts[i])
else:
nPts[i] = 0
medianBin[i] = 0
sigGbin[i] = 0
if (verbose):
print('median:', np.median(medianBin[nPts>0]), 'std.dev:', np.std(medianBin[nPts>0]))
return xBin, nPts, medianBin, sigGbin
# this function computes polynomial models given some data x
# and parameters theta
def polynomial_fit(theta, x):
"""Polynomial model of degree (len(theta) - 1)"""
return sum(t * x ** n for (n, t) in enumerate(theta))
# a direct optimization approach is used to get best model
# parameters (which minimize -logL)
def best_theta(data, degree, model=polynomial_fit):
from scipy import optimize
theta_0 = (degree + 1) * [0]
neg_logL = lambda theta: -logL(data, theta, model)
return optimize.fmin_bfgs(neg_logL, theta_0, disp=False)
# compute the data log-likelihood given a model
def logL(data, theta, model=polynomial_fit):
from scipy import stats
"""Gaussian log-likelihood of the model at theta"""
x, y, sigma_y = data
y_fit = model(theta, x)
return sum(stats.norm.logpdf(*args)
for args in zip(y, y_fit, sigma_y))
### PLOTS
# quick plot - binned median
def qpBM(d, Xstr, Xmin, Xmax, Ystr, Ymin, Ymax, nBin, Nsigma=3, offset=0.01):
print('medianAll:', np.median(d[Ystr]), 'std.dev.All:', sigG(d[Ystr]))
print('N=', np.size(d[Ystr]), 'min=', np.min(d[Ystr]), 'max=', np.max(d[Ystr]))
ax = plt.axes()
ax.scatter(d[Xstr], d[Ystr], s=0.01, c='black')
# binning
xBinM, nPtsM, medianBinM, sigGbinM = fitMedians(d[Xstr], d[Ystr], Xmin, Xmax, nBin, 1)
# plotting
ax.scatter(xBinM, medianBinM, s=30.0, c='black', alpha=0.8)
ax.scatter(xBinM, medianBinM, s=15.0, c='yellow', alpha=0.3)
#
TwoSigP = medianBinM + Nsigma*sigGbinM
TwoSigM = medianBinM - Nsigma*sigGbinM
ax.plot(xBinM, TwoSigP, c='yellow')
ax.plot(xBinM, TwoSigM, c='yellow')
#
rmsBin = np.sqrt(nPtsM) / np.sqrt(np.pi/2) * sigGbinM
rmsP = medianBinM + rmsBin
rmsM = medianBinM - rmsBin
ax.plot(xBinM, rmsP, c='cyan')
ax.plot(xBinM, rmsM, c='cyan')
#
xL = np.linspace(-100,100)
ax.plot(xL, 0*xL, c='red')
ax.plot(xL, 0*xL+offset, '--', c='red')
ax.plot(xL, 0*xL-offset, '--', c='red')
#
ax.set_xlabel(Xstr)
ax.set_ylabel(Ystr)
ax.set_xlim(Xmin, Xmax)
ax.set_ylim(Ymin, Ymax)
plt.show()
return
def qphist(arr, xMin, xMax, xLabel, verbose = False):
ax = plt.axes()
hist(arr, bins='knuth', ax=ax, histtype='stepfilled', ec='k', fc='#AAAAAA')
ax.set_xlabel(xLabel)
ax.set_ylabel('n')
ax.set_xlim(xMin, xMax)
plt.show()
if (verbose):
print('Min, max: ', np.min(arr),np.max(arr))
print('Mean, median: ', np.mean(arr),np.median(arr))
print('sigG, st.dev.: ', sigG(arr),np.std(arr))
return
# wrapper around plotdelMag below to use only two vectors instead of astropy Table
def plotdelMagArr(xVec, yVec, kw):
t = Table()
t[kw['Xstr']] = xVec
t[kw['Ystr']] = yVec
plotdelMag(t, kw)
return
def plotdelMag(d, kw):
print('medianAll:', np.median(d[kw['Ystr']]), 'std.dev.All:', sigG(d[kw['Ystr']]))
print('N=', np.size(d[kw['Ystr']]), 'min=', np.min(d[kw['Ystr']]), 'max=', np.max(d[kw['Ystr']]))
fig, ax = plt.subplots(figsize=(12, 8))
ax.scatter(d[kw['Xstr']], d[kw['Ystr']], s=kw['symbSize'], c='black')
# binning
xBinM, nPtsM, medianBinM, sigGbinM = fitMedians(d[kw['Xstr']], \
d[kw['Ystr']], kw['XminBin'], kw['XmaxBin'], kw['nBin'], 1)
# plotting
if (kw['offset'] >= 0):
xL = np.linspace(kw['XminBin'], kw['XmaxBin'])
ax.plot(xL, 0*xL, '-', c='red', linewidth=3)
ax.plot(xL, 0*xL+kw['offset'], '--', c='red', linewidth=3)
ax.plot(xL, 0*xL-kw['offset'], '--', c='red', linewidth=3)
if (0):
ax.scatter(xBinM, medianBinM, s=30.0, c='black', alpha=0.8)
ax.scatter(xBinM, medianBinM, s=15.0, c='yellow', alpha=0.3)
#
TwoSigP = medianBinM + kw['Nsigma']*sigGbinM
TwoSigM = medianBinM - kw['Nsigma']*sigGbinM
ax.plot(xBinM, TwoSigP, c='yellow', linewidth=3)
ax.plot(xBinM, TwoSigM, c='yellow', linewidth=3)
#
rmsBin = np.sqrt(nPtsM) / np.sqrt(np.pi/2) * sigGbinM
rmsP = medianBinM + rmsBin
rmsM = medianBinM - rmsBin
ax.plot(xBinM, rmsP, c='cyan', linewidth=3)
ax.plot(xBinM, rmsM, c='cyan', linewidth=3)
#
ax.set_xlabel(kw['Xlabel'], fontsize=22)
ax.set_ylabel(kw['Ylabel'], fontsize=22)
ax.set_xlim(kw['Xmin'], kw['Xmax'])
ax.set_ylim(kw['Ymin'], kw['Ymax'])
plt.xticks(fontsize=22)
plt.yticks(fontsize=22)
plt.savefig(kw['plotName'], dpi=600)
print('saved plot as:', kw['plotName'])
plt.show()
return
|
Karuntg/SDSS_SSC
|
Analysis_2020/ZItools.py
|
Python
|
gpl-3.0
| 7,967
|
[
"Gaussian"
] |
28b07fdf56b3ff6a9d4a17793040aedc96c5d07f065cee9e4c611915838250ee
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
################################################################################
#
# RMG - Reaction Mechanism Generator
#
# Copyright (c) 2002-2017 Prof. William H. Green (whgreen@mit.edu),
# Prof. Richard H. West (r.west@neu.edu) and the RMG Team (rmg_dev@mit.edu)
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the 'Software'),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
################################################################################
import math
import numpy
import os.path
from rmgpy.cantherm.common import checkConformerEnergy
import rmgpy.constants as constants
from rmgpy.statmech import IdealGasTranslation, NonlinearRotor, LinearRotor, HarmonicOscillator, Conformer
################################################################################
class GaussianLog:
"""
Represent a log file from Gaussian. The attribute `path` refers to the
location on disk of the Gaussian log file of interest. Methods are provided
to extract a variety of information into CanTherm classes and/or NumPy
arrays.
"""
def __init__(self, path):
self.path = path
def getNumberOfAtoms(self):
"""
Return the number of atoms in the molecular configuration used in
the Gaussian log file.
"""
Natoms = 0
# Open Gaussian log file for parsing
f = open(self.path, 'r')
line = f.readline()
while line != '' and Natoms == 0:
# Automatically determine the number of atoms
if 'Input orientation:' in line and Natoms == 0:
for i in range(5): line = f.readline()
while '---------------------------------------------------------------------' not in line:
Natoms += 1
line = f.readline()
line = f.readline()
# Close file when finished
f.close()
# Return the result
return Natoms
def loadForceConstantMatrix(self):
"""
Return the force constant matrix from the Gaussian log file. The job
that generated the log file must have the option ``iop(7/33=1)`` in
order for the proper force constant matrix (in Cartesian coordinates)
to be printed in the log file. If multiple such matrices are identified,
only the last is returned. The units of the returned force constants
are J/m^2. If no force constant matrix can be found in the log file,
``None`` is returned.
"""
F = None
Natoms = self.getNumberOfAtoms()
Nrows = Natoms * 3
f = open(self.path, 'r')
line = f.readline()
while line != '':
# Read force constant matrix
if 'Force constants in Cartesian coordinates:' in line:
F = numpy.zeros((Nrows,Nrows), numpy.float64)
for i in range(int(math.ceil(Nrows / 5.0))):
# Header row
line = f.readline()
# Matrix element rows
for j in range(i*5, Nrows):
data = f.readline().split()
for k in range(len(data)-1):
F[j,i*5+k] = float(data[k+1].replace('D', 'E'))
F[i*5+k,j] = F[j,i*5+k]
# Convert from atomic units (Hartree/Bohr_radius^2) to J/m^2
F *= 4.35974417e-18 / 5.291772108e-11**2
line = f.readline()
# Close file when finished
f.close()
return F
def loadGeometry(self):
"""
Return the optimum geometry of the molecular configuration from the
Gaussian log file. If multiple such geometries are identified, only the
last is returned.
"""
number = []; coord = []
f = open(self.path, 'r')
line = f.readline()
while line != '':
# Automatically determine the number of atoms
if 'Input orientation:' in line:
number = []; coord = []
for i in range(5): line = f.readline()
while '---------------------------------------------------------------------' not in line:
data = line.split()
number.append(int(data[1]))
coord.append([float(data[3]), float(data[4]), float(data[5])])
line = f.readline()
line = f.readline()
# Close file when finished
f.close()
coord = numpy.array(coord, numpy.float64)
number = numpy.array(number, numpy.int)
mass = numpy.zeros(len(number), numpy.float64)
# Use the atomic mass of the most common isotope rather than the
# average atomic mass
# These values were taken from "Atomic Weights and Isotopic Compositions" v3.0 (July 2010) from NIST
for i in range(len(number)):
if number[i] == 1:
mass[i] = 1.00782503207
elif number[i] == 6:
mass[i] = 12.0
elif number[i] == 7:
mass[i] = 14.0030740048
elif number[i] == 8:
mass[i] = 15.99491461956
elif number[i] == 15:
mass[i] = 30.97376163
elif number[i] == 16:
mass[i] = 31.97207100
elif number[i] == 17:
mass[i] = 35.4527
else:
raise NotImplementedError('Atomic number {0:d} not yet supported in loadGeometry().'.format(number[i]))
return coord, number, mass
def loadConformer(self, symmetry=None, spinMultiplicity=None, opticalIsomers=1):
"""
Load the molecular degree of freedom data from a log file created as
the result of a Gaussian "Freq" quantum chemistry calculation. As
Gaussian's guess of the external symmetry number is not always correct,
you can use the `symmetry` parameter to substitute your own value; if
not provided, the value in the Gaussian log file will be adopted. In a
log file with multiple Thermochemistry sections, only the last one will
be kept.
"""
modes = []
E0 = 0.0
f = open(self.path, 'r')
line = f.readline()
while line != '':
# The data we want is in the Thermochemistry section of the output
if '- Thermochemistry -' in line:
modes = []
inPartitionFunctions = False
line = f.readline()
while line != '':
# This marks the end of the thermochemistry section
if '-------------------------------------------------------------------' in line:
break
# Read molecular mass for external translational modes
elif 'Molecular mass:' in line:
mass = float(line.split()[2])
translation = IdealGasTranslation(mass=(mass,"amu"))
modes.append(translation)
# Read Gaussian's estimate of the external symmetry number
elif 'Rotational symmetry number' in line and symmetry is None:
symmetry = int(float(line.split()[3]))
# Read moments of inertia for external rotational modes
elif 'Rotational constants (GHZ):' in line:
inertia = [float(d) for d in line.split()[-3:]]
for i in range(3):
inertia[i] = constants.h / (8 * constants.pi * constants.pi * inertia[i] * 1e9) *constants.Na*1e23
rotation = NonlinearRotor(inertia=(inertia,"amu*angstrom^2"), symmetry=symmetry)
modes.append(rotation)
elif 'Rotational constant (GHZ):' in line:
inertia = [float(line.split()[3])]
inertia[0] = constants.h / (8 * constants.pi * constants.pi * inertia[0] * 1e9) *constants.Na*1e23
rotation = LinearRotor(inertia=(inertia[0],"amu*angstrom^2"), symmetry=symmetry)
modes.append(rotation)
# Read vibrational modes
elif 'Vibrational temperatures:' in line:
frequencies = []
frequencies.extend([float(d) for d in line.split()[2:]])
line = f.readline()
frequencies.extend([float(d) for d in line.split()[1:]])
line = f.readline()
while line.strip() != '':
frequencies.extend([float(d) for d in line.split()])
line = f.readline()
# Convert from K to cm^-1
if len(frequencies) > 0:
frequencies = [freq * 0.695039 for freq in frequencies] # kB = 0.695039 cm^-1/K
vibration = HarmonicOscillator(frequencies=(frequencies,"cm^-1"))
modes.append(vibration)
# Read ground-state energy
elif 'Sum of electronic and zero-point Energies=' in line:
E0 = float(line.split()[6]) * 4.35974394e-18 * constants.Na
# Read spin multiplicity if not explicitly given
elif 'Electronic' in line and inPartitionFunctions and spinMultiplicity is None:
spinMultiplicity = int(float(line.split()[1].replace('D', 'E')))
elif 'Log10(Q)' in line:
inPartitionFunctions = True
# Read the next line in the file
line = f.readline()
# Read the next line in the file
line = f.readline()
# Close file when finished
f.close()
return Conformer(E0=(E0*0.001,"kJ/mol"), modes=modes, spinMultiplicity=spinMultiplicity, opticalIsomers=opticalIsomers)
def loadEnergy(self,frequencyScaleFactor=1.):
"""
Load the energy in J/mol from a Gaussian log file. The file is checked
for a complete basis set extrapolation; if found, that value is
returned. Only the last energy in the file is returned. The zero-point
energy is *not* included in the returned value; it is removed from the
CBS-QB3 value.
"""
E0 = None; E0_cbs = None; scaledZPE = None
f = open(self.path, 'r')
line = f.readline()
while line != '':
if 'SCF Done:' in line:
E0 = float(line.split()[4]) * constants.E_h * constants.Na
elif 'CBS-QB3 (0 K)' in line:
E0_cbs = float(line.split()[3]) * constants.E_h * constants.Na
elif 'G3(0 K)' in line:
E0_cbs = float(line.split()[2]) * constants.E_h * constants.Na
# Read the ZPE from the "E(ZPE)=" line, as this is the scaled version.
# Gaussian defines the following as
# E (0 K) = Elec + E(ZPE),
# The ZPE is the scaled ZPE given by E(ZPE) in the log file,
# hence to get the correct Elec from E (0 K) we need to subtract the scaled ZPE
elif 'E(ZPE)' in line:
scaledZPE = float(line.split()[1]) * constants.E_h * constants.Na
elif '\\ZeroPoint=' in line:
line = line.strip() + f.readline().strip()
start = line.find('\\ZeroPoint=') + 11
end = line.find('\\', start)
scaledZPE = float(line[start:end]) * constants.E_h * constants.Na * frequencyScaleFactor
# Read the next line in the file
line = f.readline()
# Close file when finished
f.close()
if E0_cbs is not None:
if scaledZPE is None:
raise Exception('Unable to find zero-point energy in Gaussian log file.')
return E0_cbs - scaledZPE
elif E0 is not None:
return E0
else: raise Exception('Unable to find energy in Gaussian log file.')
def loadZeroPointEnergy(self):
"""
Load the unscaled zero-point energy in J/mol from a Gaussian log file.
"""
ZPE = None
f = open(self.path, 'r')
line = f.readline()
while line != '':
# Do NOT read the ZPE from the "E(ZPE)=" line, as this is the scaled version!
# We will read in the unscaled ZPE and later multiply the scaling factor
# from the input file
if 'Zero-point correction=' in line:
ZPE = float(line.split()[2]) * constants.E_h * constants.Na
elif '\\ZeroPoint=' in line:
line = line.strip() + f.readline().strip()
start = line.find('\\ZeroPoint=') + 11
end = line.find('\\', start)
ZPE = float(line[start:end]) * constants.E_h * constants.Na
# Read the next line in the file
line = f.readline()
# Close file when finished
f.close()
if ZPE is not None:
return ZPE
else:
raise Exception('Unable to find zero-point energy in Gaussian log file.')
def loadScanEnergies(self):
"""
Extract the optimized energies in J/mol from a log file, e.g. the
result of a Gaussian "Scan" quantum chemistry calculation.
"""
optfreq = False
rigidScan=False
# The array of potentials at each scan angle
Vlist = []
# Parse the Gaussian log file, extracting the energies of each
# optimized conformer in the scan
f = open(self.path, 'r')
line = f.readline()
while line != '':
# If the job contains a "freq" then we want to ignore the last energy
if ' freq ' in line:
optfreq = True
#if # scan is keyword instead of # opt, then this is a rigid scan job
#and parsing the energies is done a little differently
if '# scan' in line:
rigidScan=True
# The lines containing "SCF Done" give the energy at each
# iteration (even the intermediate ones)
if 'SCF Done:' in line:
E = float(line.split()[4])
#rigid scans will only not optimize, so just append every time it finds an energy.
if rigidScan:
Vlist.append(E)
# We want to keep the values of E that come most recently before
# the line containing "Optimization completed", since it refers
# to the optimized geometry
if 'Optimization completed' in line:
Vlist.append(E)
line = f.readline()
# Close file when finished
f.close()
#give warning in case this assumption is not true
if rigidScan==True:
print ' Assuming', os.path.basename(self.path), 'is the output from a rigid scan...'
Vlist = numpy.array(Vlist, numpy.float64)
# check to see if the scanlog indicates that a one of your reacting species may not be the lowest energy conformer
checkConformerEnergy(Vlist, self.path)
# Adjust energies to be relative to minimum energy conformer
# Also convert units from Hartree/particle to kJ/mol
Vlist -= numpy.min(Vlist)
Vlist *= constants.E_h * constants.Na
if optfreq: Vlist = Vlist[:-1]
# Determine the set of dihedral angles corresponding to the loaded energies
# This assumes that you start at 0.0, finish at 360.0, and take
# constant step sizes in between
angle = numpy.arange(0.0, 2*math.pi+0.00001, 2*math.pi/(len(Vlist)-1), numpy.float64)
return Vlist, angle
def loadNegativeFrequency(self):
"""
Return the negative frequency from a transition state frequency
calculation in cm^-1.
"""
frequencies = []
f = open(self.path, 'r')
line = f.readline()
while line != '':
# Read vibrational frequencies
if 'Frequencies --' in line:
frequencies.extend(line.split()[2:])
line = f.readline()
# Close file when finished
f.close()
frequencies = [float(freq) for freq in frequencies]
frequencies.sort()
frequency = [freq for freq in frequencies if freq < 0][0]
return frequency
|
Molecular-Image-Recognition/Molecular-Image-Recognition
|
code/rmgpy/cantherm/gaussian.py
|
Python
|
mit
| 17,734
|
[
"Gaussian"
] |
d0fc72995666c284cb23738d3726dc321d5b4c6e08bd8bf00267e8159d8b9b24
|
# -*- coding: utf-8 -*-
# HORTON: Helpful Open-source Research TOol for N-fermion systems.
# Copyright (C) 2011-2016 The HORTON Development Team
#
# This file is part of HORTON.
#
# HORTON is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# HORTON is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>
#
# --
import numpy as np
from horton.cext import compute_nucnuc
from horton.context import context
from horton.gbasis.gobasis import get_gobasis
from horton.grid.molgrid import BeckeMolGrid
from horton.io.iodata import IOData
from horton.log import log
from horton.matrix.dense import DenseLinalgFactory
from horton.meanfield.builtin import RDiracExchange, UDiracExchange
from horton.meanfield.convergence import convergence_error_eigen
from horton.meanfield.gridgroup import RGridGroup, UGridGroup
from horton.meanfield.guess import guess_core_hamiltonian
from horton.meanfield.hamiltonian import REffHam, UEffHam
from horton.meanfield.libxc import RLibXCLDA, ULibXCLDA, RLibXCGGA, ULibXCGGA
from horton.meanfield.observable import RTwoIndexTerm, RDirectTerm, RExchangeTerm
from horton.meanfield.observable import UTwoIndexTerm, UDirectTerm, UExchangeTerm
from horton.meanfield.occ import AufbauOccModel, FixedOccModel
from horton.meanfield.scf_oda import check_cubic
__all__ = [
'check_cubic_wrapper', 'check_interpolation', 'check_solve', 'helper_compute',
'check_hf_cs_hf', 'check_lih_os_hf', 'check_water_cs_hfs',
'check_n2_cs_hfs', 'check_h3_os_hfs', 'check_h3_os_pbe', 'check_co_cs_pbe',
'check_vanadium_sc_hf',
]
def check_cubic_wrapper(ham, dm0s, dm1s, do_plot=False):
focks = [dm0.new() for dm0 in dm0s]
# evaluate stuff at dm0
ham.reset(*dm0s)
e0 = ham.compute_energy()
ham.compute_fock(*focks)
g0 = 0.0
for i in xrange(ham.ndm):
g0 += focks[i].contract_two('ab,ba', dm1s[i])
g0 -= focks[i].contract_two('ab,ba', dm0s[i])
g0 *= ham.deriv_scale
# evaluate stuff at dm1
ham.reset(*dm1s)
e1 = ham.compute_energy()
ham.compute_fock(*focks)
g1 = 0.0
for i in xrange(ham.ndm):
g1 += focks[i].contract_two('ab,ba', dm1s[i])
g1 -= focks[i].contract_two('ab,ba', dm0s[i])
g1 *= ham.deriv_scale
check_cubic(ham, dm0s, dm1s, e0, e1, g0, g1, do_plot)
def check_interpolation(ham, lf, olp, kin, na, exps, do_plot=False):
dm0s = [exp.to_dm() for exp in exps]
guess_core_hamiltonian(olp, kin, na, *exps)
dm1s = [exp.to_dm() for exp in exps]
check_cubic_wrapper(ham, dm0s, dm1s, do_plot)
def check_solve(ham, scf_solver, occ_model, lf, olp, kin, na, *exps):
guess_core_hamiltonian(olp, kin, na, *exps)
if scf_solver.kind == 'exp':
occ_model.assign(*exps)
assert scf_solver.error(ham, lf, olp, *exps) > scf_solver.threshold
scf_solver(ham, lf, olp, occ_model, *exps)
assert scf_solver.error(ham, lf, olp, *exps) < scf_solver.threshold
else:
occ_model.assign(*exps)
dms = [exp.to_dm() for exp in exps]
assert scf_solver.error(ham, lf, olp, *dms) > scf_solver.threshold
scf_solver(ham, lf, olp, occ_model, *dms)
assert scf_solver.error(ham, lf, olp, *dms) < scf_solver.threshold
focks = [lf.create_two_index() for i in xrange(ham.ndm)]
ham.compute_fock(*focks)
for i in xrange(ham.ndm):
exps[i].from_fock(focks[i], olp)
occ_model.assign(*exps)
def helper_compute(ham, lf, *exps):
# Test energy before scf
dms = [exp.to_dm() for exp in exps]
ham.reset(*dms)
ham.compute_energy()
focks = [lf.create_two_index() for exp in exps]
ham.compute_fock(*focks)
return ham.cache['energy'], focks
@log.with_level(log.high)
def check_hf_cs_hf(scf_solver):
fn_fchk = context.get_fn('test/hf_sto3g.fchk')
mol = IOData.from_file(fn_fchk)
olp = mol.obasis.compute_overlap(mol.lf)
kin = mol.obasis.compute_kinetic(mol.lf)
na = mol.obasis.compute_nuclear_attraction(mol.coordinates, mol.pseudo_numbers, mol.lf)
er = mol.obasis.compute_electron_repulsion(mol.lf)
external = {'nn': compute_nucnuc(mol.coordinates, mol.pseudo_numbers)}
terms = [
RTwoIndexTerm(kin, 'kin'),
RDirectTerm(er, 'hartree'),
RExchangeTerm(er, 'x_hf'),
RTwoIndexTerm(na, 'ne'),
]
ham = REffHam(terms, external)
occ_model = AufbauOccModel(5)
check_solve(ham, scf_solver, occ_model, mol.lf, olp, kin, na, mol.exp_alpha)
# test orbital energies
expected_energies = np.array([
-2.59083334E+01, -1.44689996E+00, -5.57467136E-01, -4.62288194E-01,
-4.62288194E-01, 5.39578910E-01,
])
assert abs(mol.exp_alpha.energies - expected_energies).max() < 1e-5
ham.compute_energy()
# compare with g09
assert abs(ham.cache['energy'] - -9.856961609951867E+01) < 1e-8
assert abs(ham.cache['energy_kin'] - 9.766140786239E+01) < 2e-7
assert abs(ham.cache['energy_hartree'] + ham.cache['energy_x_hf'] - 4.561984106482E+01) < 1e-6
assert abs(ham.cache['energy_ne'] - -2.465756615329E+02) < 1e-6
assert abs(ham.cache['energy_nn'] - 4.7247965053) < 1e-8
@log.with_level(log.high)
def check_lih_os_hf(scf_solver):
fn_fchk = context.get_fn('test/li_h_3-21G_hf_g09.fchk')
mol = IOData.from_file(fn_fchk)
olp = mol.obasis.compute_overlap(mol.lf)
kin = mol.obasis.compute_kinetic(mol.lf)
na = mol.obasis.compute_nuclear_attraction(mol.coordinates, mol.pseudo_numbers, mol.lf)
er = mol.obasis.compute_electron_repulsion(mol.lf)
external = {'nn': compute_nucnuc(mol.coordinates, mol.pseudo_numbers)}
terms = [
UTwoIndexTerm(kin, 'kin'),
UDirectTerm(er, 'hartree'),
UExchangeTerm(er, 'x_hf'),
UTwoIndexTerm(na, 'ne'),
]
ham = UEffHam(terms, external)
occ_model = AufbauOccModel(2, 1)
check_solve(ham, scf_solver, occ_model, mol.lf, olp, kin, na, mol.exp_alpha, mol.exp_beta)
expected_alpha_energies = np.array([
-2.76116635E+00, -7.24564188E-01, -1.79148636E-01, -1.28235698E-01,
-1.28235698E-01, -7.59817520E-02, -1.13855167E-02, 6.52484445E-03,
6.52484445E-03, 7.52201895E-03, 9.70893294E-01,
])
expected_beta_energies = np.array([
-2.76031162E+00, -2.08814026E-01, -1.53071066E-01, -1.25264964E-01,
-1.25264964E-01, -1.24605870E-02, 5.12761388E-03, 7.70499854E-03,
7.70499854E-03, 2.85176080E-02, 1.13197479E+00,
])
assert abs(mol.exp_alpha.energies - expected_alpha_energies).max() < 1e-5
assert abs(mol.exp_beta.energies - expected_beta_energies).max() < 1e-5
ham.compute_energy()
# compare with g09
assert abs(ham.cache['energy'] - -7.687331212191962E+00) < 1e-8
assert abs(ham.cache['energy_kin'] - 7.640603924034E+00) < 2e-7
assert abs(ham.cache['energy_hartree'] + ham.cache['energy_x_hf'] - 2.114420907894E+00) < 1e-7
assert abs(ham.cache['energy_ne'] - -1.811548789281E+01) < 2e-7
assert abs(ham.cache['energy_nn'] - 0.6731318487) < 1e-8
@log.with_level(log.high)
def check_water_cs_hfs(scf_solver):
fn_fchk = context.get_fn('test/water_hfs_321g.fchk')
mol = IOData.from_file(fn_fchk)
grid = BeckeMolGrid(mol.coordinates, mol.numbers, mol.pseudo_numbers, random_rotate=False)
olp = mol.obasis.compute_overlap(mol.lf)
kin = mol.obasis.compute_kinetic(mol.lf)
na = mol.obasis.compute_nuclear_attraction(mol.coordinates, mol.pseudo_numbers, mol.lf)
er = mol.obasis.compute_electron_repulsion(mol.lf)
external = {'nn': compute_nucnuc(mol.coordinates, mol.pseudo_numbers)}
terms = [
RTwoIndexTerm(kin, 'kin'),
RDirectTerm(er, 'hartree'),
RGridGroup(mol.obasis, grid, [
RDiracExchange(),
]),
RTwoIndexTerm(na, 'ne'),
]
ham = REffHam(terms, external)
# The convergence should be reasonable, not perfect because of limited
# precision in Gaussian fchk file and different integration grids:
assert convergence_error_eigen(ham, mol.lf, olp, mol.exp_alpha) < 3e-5
# Recompute the orbitals and orbital energies. This should be reasonably OK.
dm_alpha = mol.exp_alpha.to_dm()
ham.reset(dm_alpha)
ham.compute_energy()
fock_alpha = mol.lf.create_two_index()
ham.compute_fock(fock_alpha)
mol.exp_alpha.from_fock(fock_alpha, olp)
expected_energies = np.array([
-1.83691041E+01, -8.29412411E-01, -4.04495188E-01, -1.91740814E-01,
-1.32190590E-01, 1.16030419E-01, 2.08119657E-01, 9.69825207E-01,
9.99248500E-01, 1.41697384E+00, 1.47918828E+00, 1.61926596E+00,
2.71995350E+00
])
assert abs(mol.exp_alpha.energies - expected_energies).max() < 2e-4
assert abs(ham.cache['energy_ne'] - -1.977921986200E+02) < 1e-7
assert abs(ham.cache['energy_kin'] - 7.525067610865E+01) < 1e-9
assert abs(ham.cache['energy_hartree'] + ham.cache['energy_x_dirac'] - 3.864299848058E+01) < 2e-4
assert abs(ham.cache['energy'] - -7.474134898935590E+01) < 2e-4
assert abs(ham.cache['energy_nn'] - 9.1571750414) < 2e-8
# Converge from scratch and check energies
occ_model = AufbauOccModel(5)
check_solve(ham, scf_solver, occ_model, mol.lf, olp, kin, na, mol.exp_alpha)
ham.compute_energy()
assert abs(ham.cache['energy_ne'] - -1.977921986200E+02) < 1e-4
assert abs(ham.cache['energy_kin'] - 7.525067610865E+01) < 1e-4
assert abs(ham.cache['energy_hartree'] + ham.cache['energy_x_dirac'] - 3.864299848058E+01) < 2e-4
assert abs(ham.cache['energy'] - -7.474134898935590E+01) < 2e-4
@log.with_level(log.high)
def check_n2_cs_hfs(scf_solver):
fn_fchk = context.get_fn('test/n2_hfs_sto3g.fchk')
mol = IOData.from_file(fn_fchk)
grid = BeckeMolGrid(mol.coordinates, mol.numbers, mol.pseudo_numbers, 'veryfine', random_rotate=False)
olp = mol.obasis.compute_overlap(mol.lf)
kin = mol.obasis.compute_kinetic(mol.lf)
na = mol.obasis.compute_nuclear_attraction(mol.coordinates, mol.pseudo_numbers, mol.lf)
er = mol.obasis.compute_electron_repulsion(mol.lf)
external = {'nn': compute_nucnuc(mol.coordinates, mol.pseudo_numbers)}
libxc_term = RLibXCLDA('x')
terms1 = [
RTwoIndexTerm(kin, 'kin'),
RDirectTerm(er, 'hartree'),
RGridGroup(mol.obasis, grid, [libxc_term]),
RTwoIndexTerm(na, 'ne'),
]
ham1 = REffHam(terms1, external)
builtin_term = RDiracExchange()
terms2 = [
RTwoIndexTerm(kin, 'kin'),
RDirectTerm(er, 'hartree'),
RGridGroup(mol.obasis, grid, [builtin_term]),
RTwoIndexTerm(na, 'ne'),
]
ham2 = REffHam(terms2, external)
# Compare the potential computed by libxc with the builtin implementation
energy1, focks1 = helper_compute(ham1, mol.lf, mol.exp_alpha)
energy2, focks2 = helper_compute(ham2, mol.lf, mol.exp_alpha)
libxc_pot = ham1.cache.load('pot_libxc_lda_x_alpha')
builtin_pot = ham2.cache.load('pot_x_dirac_alpha')
# Libxc apparently approximates values of the potential below 1e-4 with zero.
assert abs(libxc_pot - builtin_pot).max() < 1e-4
# Check of the libxc energy matches our implementation
assert abs(energy1 - energy2) < 1e-10
ex1 = ham1.cache['energy_libxc_lda_x']
ex2 = ham2.cache['energy_x_dirac']
assert abs(ex1 - ex2) < 1e-10
# The convergence should be reasonable, not perfect because of limited
# precision in Gaussian fchk file:
assert convergence_error_eigen(ham1, mol.lf, olp, mol.exp_alpha) < 1e-5
assert convergence_error_eigen(ham2, mol.lf, olp, mol.exp_alpha) < 1e-5
occ_model = AufbauOccModel(7)
for ham in ham1, ham2:
# Converge from scratch
check_solve(ham, scf_solver, occ_model, mol.lf, olp, kin, na, mol.exp_alpha)
# test orbital energies
expected_energies = np.array([
-1.37107053E+01, -1.37098006E+01, -9.60673085E-01, -3.57928483E-01,
-3.16017655E-01, -3.16017655E-01, -2.12998316E-01, 6.84030479E-02,
6.84030479E-02, 7.50192517E-01,
])
assert abs(mol.exp_alpha.energies - expected_energies).max() < 3e-5
ham.compute_energy()
assert abs(ham.cache['energy_ne'] - -2.981579553570E+02) < 1e-5
assert abs(ham.cache['energy_kin'] - 1.061620887711E+02) < 1e-5
assert abs(ham.cache['energy'] - -106.205213597) < 1e-4
assert abs(ham.cache['energy_nn'] - 23.3180604505) < 1e-8
assert abs(ham1.cache['energy_hartree'] + ham1.cache['energy_libxc_lda_x'] - 6.247259253877E+01) < 1e-4
assert abs(ham2.cache['energy_hartree'] + ham2.cache['energy_x_dirac'] - 6.247259253877E+01) < 1e-4
@log.with_level(log.high)
def check_h3_os_hfs(scf_solver):
fn_fchk = context.get_fn('test/h3_hfs_321g.fchk')
mol = IOData.from_file(fn_fchk)
grid = BeckeMolGrid(mol.coordinates, mol.numbers, mol.pseudo_numbers, 'veryfine', random_rotate=False)
olp = mol.obasis.compute_overlap(mol.lf)
kin = mol.obasis.compute_kinetic(mol.lf)
na = mol.obasis.compute_nuclear_attraction(mol.coordinates, mol.pseudo_numbers, mol.lf)
er = mol.obasis.compute_electron_repulsion(mol.lf)
external = {'nn': compute_nucnuc(mol.coordinates, mol.pseudo_numbers)}
libxc_term = ULibXCLDA('x')
terms1 = [
UTwoIndexTerm(kin, 'kin'),
UDirectTerm(er, 'hartree'),
UGridGroup(mol.obasis, grid, [libxc_term]),
UTwoIndexTerm(na, 'ne'),
]
ham1 = UEffHam(terms1, external)
builtin_term = UDiracExchange()
terms2 = [
UTwoIndexTerm(kin, 'kin'),
UDirectTerm(er, 'hartree'),
UGridGroup(mol.obasis, grid, [builtin_term]),
UTwoIndexTerm(na, 'ne'),
]
ham2 = UEffHam(terms2, external)
# Compare the potential computed by libxc with the builtin implementation
energy1, focks1 = helper_compute(ham1, mol.lf, mol.exp_alpha, mol.exp_beta)
energy2, focks2 = helper_compute(ham2, mol.lf, mol.exp_alpha, mol.exp_beta)
libxc_pot = ham1.cache.load('pot_libxc_lda_x_both')[:,0]
builtin_pot = ham2.cache.load('pot_x_dirac_alpha')
# Libxc apparently approximates values of the potential below 1e-4 with zero.
assert abs(libxc_pot - builtin_pot).max() < 1e-4
# Check of the libxc energy matches our implementation
assert abs(energy1 - energy2) < 1e-10
ex1 = ham1.cache['energy_libxc_lda_x']
ex2 = ham2.cache['energy_x_dirac']
assert abs(ex1 - ex2) < 1e-10
# The convergence should be reasonable, not perfect because of limited
# precision in Gaussian fchk file:
assert convergence_error_eigen(ham1, mol.lf, olp, mol.exp_alpha, mol.exp_beta) < 1e-5
assert convergence_error_eigen(ham2, mol.lf, olp, mol.exp_alpha, mol.exp_beta) < 1e-5
occ_model = AufbauOccModel(2, 1)
for ham in ham1, ham2:
# Converge from scratch
check_solve(ham, scf_solver, occ_model, mol.lf, olp, kin, na, mol.exp_alpha, mol.exp_beta)
# test orbital energies
expected_energies = np.array([
-4.93959157E-01, -1.13961330E-01, 2.38730924E-01, 7.44216538E-01,
8.30143356E-01, 1.46613581E+00
])
assert abs(mol.exp_alpha.energies - expected_energies).max() < 1e-5
expected_energies = np.array([
-4.34824166E-01, 1.84114514E-04, 3.24300545E-01, 7.87622756E-01,
9.42415831E-01, 1.55175481E+00
])
assert abs(mol.exp_beta.energies - expected_energies).max() < 1e-5
ham.compute_energy()
# compare with g09
assert abs(ham.cache['energy_ne'] - -6.832069993374E+00) < 1e-5
assert abs(ham.cache['energy_kin'] - 1.870784279014E+00) < 1e-5
assert abs(ham.cache['energy'] - -1.412556114057104E+00) < 1e-5
assert abs(ham.cache['energy_nn'] - 1.8899186021) < 1e-8
assert abs(ham1.cache['energy_hartree'] + ham1.cache['energy_libxc_lda_x'] - 1.658810998195E+00) < 1e-6
assert abs(ham2.cache['energy_hartree'] + ham2.cache['energy_x_dirac'] - 1.658810998195E+00) < 1e-6
@log.with_level(log.high)
def check_co_cs_pbe(scf_solver):
fn_fchk = context.get_fn('test/co_pbe_sto3g.fchk')
mol = IOData.from_file(fn_fchk)
grid = BeckeMolGrid(mol.coordinates, mol.numbers, mol.pseudo_numbers, 'fine', random_rotate=False)
olp = mol.obasis.compute_overlap(mol.lf)
kin = mol.obasis.compute_kinetic(mol.lf)
na = mol.obasis.compute_nuclear_attraction(mol.coordinates, mol.pseudo_numbers, mol.lf)
er = mol.obasis.compute_electron_repulsion(mol.lf)
external = {'nn': compute_nucnuc(mol.coordinates, mol.pseudo_numbers)}
terms = [
RTwoIndexTerm(kin, 'kin'),
RDirectTerm(er, 'hartree'),
RGridGroup(mol.obasis, grid, [
RLibXCGGA('x_pbe'),
RLibXCGGA('c_pbe'),
]),
RTwoIndexTerm(na, 'ne'),
]
ham = REffHam(terms, external)
# Test energy before scf
energy, focks = helper_compute(ham, mol.lf, mol.exp_alpha)
assert abs(energy - -1.116465967841901E+02) < 1e-4
# The convergence should be reasonable, not perfect because of limited
# precision in Gaussian fchk file:
assert convergence_error_eigen(ham, mol.lf, olp, mol.exp_alpha) < 1e-5
# Converge from scratch
occ_model = AufbauOccModel(7)
check_solve(ham, scf_solver, occ_model, mol.lf, olp, kin, na, mol.exp_alpha)
# test orbital energies
expected_energies = np.array([
-1.86831122E+01, -9.73586915E+00, -1.03946082E+00, -4.09331776E-01,
-3.48686522E-01, -3.48686522E-01, -2.06049056E-01, 5.23730418E-02,
5.23730418E-02, 6.61093726E-01
])
assert abs(mol.exp_alpha.energies - expected_energies).max() < 1e-2
ham.compute_energy()
# compare with g09
assert abs(ham.cache['energy_ne'] - -3.072370116827E+02) < 1e-2
assert abs(ham.cache['energy_kin'] - 1.103410779827E+02) < 1e-2
assert abs(ham.cache['energy_hartree'] + ham.cache['energy_libxc_gga_x_pbe'] + ham.cache['energy_libxc_gga_c_pbe'] - 6.273115782683E+01) < 1e-2
assert abs(ham.cache['energy'] - -1.116465967841901E+02) < 1e-4
assert abs(ham.cache['energy_nn'] - 22.5181790889) < 1e-7
@log.with_level(log.high)
def check_h3_os_pbe(scf_solver):
fn_fchk = context.get_fn('test/h3_pbe_321g.fchk')
mol = IOData.from_file(fn_fchk)
grid = BeckeMolGrid(mol.coordinates, mol.numbers, mol.pseudo_numbers, 'veryfine', random_rotate=False)
olp = mol.obasis.compute_overlap(mol.lf)
kin = mol.obasis.compute_kinetic(mol.lf)
na = mol.obasis.compute_nuclear_attraction(mol.coordinates, mol.pseudo_numbers, mol.lf)
er = mol.obasis.compute_electron_repulsion(mol.lf)
external = {'nn': compute_nucnuc(mol.coordinates, mol.pseudo_numbers)}
terms = [
UTwoIndexTerm(kin, 'kin'),
UDirectTerm(er, 'hartree'),
UGridGroup(mol.obasis, grid, [
ULibXCGGA('x_pbe'),
ULibXCGGA('c_pbe'),
]),
UTwoIndexTerm(na, 'ne'),
]
ham = UEffHam(terms, external)
# compute the energy before converging
dm_alpha = mol.exp_alpha.to_dm()
dm_beta = mol.exp_beta.to_dm()
ham.reset(dm_alpha, dm_beta)
ham.compute_energy()
assert abs(ham.cache['energy'] - -1.593208400939354E+00) < 1e-5
# The convergence should be reasonable, not perfect because of limited
# precision in Gaussian fchk file:
assert convergence_error_eigen(ham, mol.lf, olp, mol.exp_alpha, mol.exp_beta) < 2e-6
# Converge from scratch
occ_model = AufbauOccModel(2, 1)
check_solve(ham, scf_solver, occ_model, mol.lf, olp, kin, na, mol.exp_alpha, mol.exp_beta)
# test orbital energies
expected_energies = np.array([
-5.41141676E-01, -1.56826691E-01, 2.13089637E-01, 7.13565167E-01,
7.86810564E-01, 1.40663544E+00
])
assert abs(mol.exp_alpha.energies - expected_energies).max() < 2e-5
expected_energies = np.array([
-4.96730336E-01, -5.81411249E-02, 2.73586652E-01, 7.41987185E-01,
8.76161160E-01, 1.47488421E+00
])
assert abs(mol.exp_beta.energies - expected_energies).max() < 2e-5
ham.compute_energy()
# compare with g09
assert abs(ham.cache['energy_ne'] - -6.934705182067E+00) < 1e-5
assert abs(ham.cache['energy_kin'] - 1.948808793424E+00) < 1e-5
assert abs(ham.cache['energy_hartree'] + ham.cache['energy_libxc_gga_x_pbe'] + ham.cache['energy_libxc_gga_c_pbe'] - 1.502769385597E+00) < 1e-5
assert abs(ham.cache['energy'] - -1.593208400939354E+00) < 1e-5
assert abs(ham.cache['energy_nn'] - 1.8899186021) < 1e-8
@log.with_level(log.high)
def check_vanadium_sc_hf(scf_solver):
"""Try to converge the SCF for the neutral vanadium atom with fixe fractional occupations.
Parameters
----------
scf_solver : one of the SCFSolver types in HORTON
A configured SCF solver that must be tested.
"""
# vanadium atoms
numbers = np.array([23])
pseudo_numbers = numbers.astype(float)
coordinates = np.zeros((1, 3), float)
# Simple basis set
obasis = get_gobasis(coordinates, numbers, 'def2-tzvpd')
# Dense matrices
lf = DenseLinalgFactory(obasis.nbasis)
# Compute integrals
olp = obasis.compute_overlap(lf)
kin = obasis.compute_kinetic(lf)
na = obasis.compute_nuclear_attraction(coordinates, pseudo_numbers, lf)
er = obasis.compute_electron_repulsion(lf)
# Setup of restricted HF Hamiltonian
terms = [
RTwoIndexTerm(kin, 'kin'),
RDirectTerm(er, 'hartree'),
RExchangeTerm(er, 'x_hf'),
RTwoIndexTerm(na, 'ne'),
]
ham = REffHam(terms)
# Define fractional occupations of interest. (Spin-compensated case)
occ_model = FixedOccModel(np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 0.5]))
# Allocate orbitals and make the initial guess
exp_alpha = lf.create_expansion(obasis.nbasis)
guess_core_hamiltonian(olp, kin, na, exp_alpha)
# SCF test
check_solve(ham, scf_solver, occ_model, lf, olp, kin, na, exp_alpha)
|
crisely09/horton
|
horton/meanfield/test/common.py
|
Python
|
gpl-3.0
| 22,472
|
[
"Gaussian"
] |
acf2363ce1a100cb032b591b40fed56fd247b346e24dbdf26c016d1f2a8190c9
|
# -*- coding: UTF-8 -*-
from setuptools import setup, find_packages
import versioneer
setup(
name='bioconda-utils',
author="Johannes Köster, Ryan Dale, The Bioconda Team",
description="Utilities for building and managing conda packages",
license="MIT",
packages=find_packages(exclude=['test']),
include_package_data=True,
data_files=[
(
'bioconda_utils',
[
'bioconda_utils/bioconda_utils-requirements.txt',
'bioconda_utils/config.schema.yaml',
],
)
],
entry_points={"console_scripts": [
"bioconda-utils = bioconda_utils.cli:main"
]},
classifiers=[
"Development Status :: 4 - Beta",
# "Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Programming Language :: Python :: 3"
],
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
)
|
bioconda/bioconda-utils
|
setup.py
|
Python
|
mit
| 1,092
|
[
"Bioconda"
] |
d9d140bb378e1f6fad5c7687037ba4d08066742e80a64d8c2fa735c3dced2391
|
''' file name : convexhull.py
Description : This sample shows how to find the convex hull of contours
This is Python version of this tutorial : http://opencv.itseez.com/doc/tutorials/imgproc/shapedescriptors/hull/hull.html
Level : Beginner
Benefits : Learn to use cv2.convexHull()
Usage : python convexhull.py
Written by : Abid K. (abidrahman2@gmail.com) , Visit opencvpython.blogspot.com for more tutorials'''
import cv2
import numpy as np
def thresh_callback(thresh):
edges = cv2.Canny(blur,thresh,thresh*2)
drawing = np.zeros(img.shape,np.uint8) # Image to draw the contours
contours,hierarchy = cv2.findContours(edges,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
hull = cv2.convexHull(cnt)
cv2.drawContours(drawing,[cnt],0,(0,255,0),2) # draw contours in green color
cv2.drawContours(drawing,[hull],0,(0,0,255),2) # draw contours in red color
cv2.imshow('output',drawing)
cv2.imshow('input',img)
img = cv2.imread('messi5.jpg')
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray,(5,5),0)
cv2.namedWindow('input')
thresh = 100
max_thresh = 255
cv2.createTrackbar('canny thresh:','input',thresh,max_thresh,thresh_callback)
thresh_callback(0)
if cv2.waitKey(0) == 27:
cv2.destroyAllWindows()
### For more details & feature extraction on contours, visit : http://opencvpython.blogspot.com/2012/04/contour-features.html
|
asrob-uc3m/rpc_rpi
|
src/python/opencv_python_tutorials/Official_Tutorial_Python_Codes/3_imgproc/convexhull.py
|
Python
|
gpl-3.0
| 1,437
|
[
"VisIt"
] |
434eccac9809bf78cb624380b3203e0c82779f9b557cc108baedec0239c61d66
|
from __future__ import absolute_import, unicode_literals
from ..serialization import (
TembaObject, SimpleField, BooleanField, IntegerField, DatetimeField, ObjectListField, ObjectField
)
class Broadcast(TembaObject):
id = IntegerField()
urns = SimpleField()
contacts = SimpleField()
groups = SimpleField()
text = SimpleField()
status = SimpleField()
created_on = DatetimeField()
class Campaign(TembaObject):
uuid = SimpleField()
name = SimpleField()
group = SimpleField(src='group_uuid')
created_on = DatetimeField()
class Contact(TembaObject):
uuid = SimpleField()
name = SimpleField()
urns = SimpleField()
groups = SimpleField(src='group_uuids')
fields = SimpleField()
language = SimpleField()
blocked = SimpleField()
failed = SimpleField()
modified_on = DatetimeField()
class Group(TembaObject):
uuid = SimpleField()
name = SimpleField()
size = IntegerField()
class Event(TembaObject):
uuid = SimpleField()
campaign = SimpleField(src='campaign_uuid')
relative_to = SimpleField()
offset = IntegerField()
unit = SimpleField()
delivery_hour = IntegerField()
message = SimpleField()
flow = SimpleField(src='flow_uuid')
created_on = DatetimeField()
class Field(TembaObject):
key = SimpleField()
label = SimpleField()
value_type = SimpleField()
class RuleSet(TembaObject):
uuid = SimpleField(src='node')
label = SimpleField()
response_type = SimpleField()
class Flow(TembaObject):
uuid = SimpleField()
name = SimpleField()
archived = SimpleField()
labels = SimpleField()
runs = IntegerField()
completed_runs = IntegerField()
expires = IntegerField()
rulesets = ObjectListField(item_class=RuleSet)
created_on = DatetimeField()
class FlowDefinition(TembaObject):
metadata = SimpleField()
version = IntegerField()
base_language = SimpleField()
flow_type = SimpleField()
action_sets = SimpleField()
rule_sets = SimpleField()
entry = SimpleField()
class Label(TembaObject):
uuid = SimpleField()
name = SimpleField()
count = IntegerField()
class Message(TembaObject):
id = IntegerField()
broadcast = IntegerField(optional=True)
contact = SimpleField()
urn = SimpleField()
status = SimpleField()
type = SimpleField()
labels = SimpleField()
direction = SimpleField()
archived = SimpleField()
text = SimpleField()
created_on = DatetimeField()
delivered_on = DatetimeField()
sent_on = DatetimeField()
class Org(TembaObject):
name = SimpleField()
country = SimpleField()
languages = SimpleField()
primary_language = SimpleField()
timezone = SimpleField()
date_style = SimpleField()
anon = SimpleField()
class RunValueSet(TembaObject):
node = SimpleField()
category = SimpleField()
text = SimpleField()
rule_value = SimpleField()
value = SimpleField()
label = SimpleField()
time = DatetimeField()
class FlowStep(TembaObject):
node = SimpleField()
text = SimpleField()
value = SimpleField()
type = SimpleField()
arrived_on = DatetimeField()
left_on = DatetimeField()
class Run(TembaObject):
id = IntegerField(src='run')
flow = SimpleField(src='flow_uuid')
contact = SimpleField()
steps = ObjectListField(item_class=FlowStep)
values = ObjectListField(item_class=RunValueSet)
created_on = DatetimeField()
modified_on = DatetimeField()
expires_on = DatetimeField()
expired_on = DatetimeField()
completed = BooleanField()
@classmethod
def deserialize(cls, item):
run = super(Run, cls).deserialize(item)
# Temba API should only be returning values for the last visit to each step but returns all instead
last_only = []
nodes_seen = set()
for valueset in reversed(run.values):
if valueset.node not in nodes_seen:
last_only.append(valueset)
nodes_seen.add(valueset.node)
last_only.reverse()
run.values = last_only
return run
class Geometry(TembaObject):
type = SimpleField()
coordinates = SimpleField()
class Boundary(TembaObject):
boundary = SimpleField()
name = SimpleField()
level = IntegerField()
parent = SimpleField()
geometry = ObjectField(item_class=Geometry)
class CategoryStats(TembaObject):
count = IntegerField()
label = SimpleField()
class Result(TembaObject):
boundary = SimpleField(optional=True)
set = IntegerField()
unset = IntegerField()
open_ended = SimpleField()
label = SimpleField()
categories = ObjectListField(item_class=CategoryStats)
|
system7-open-source/rapidpro-python
|
temba_client/v1/types.py
|
Python
|
bsd-3-clause
| 4,759
|
[
"VisIt"
] |
6f70dd151f89543cab40cc610475f0d782b44444f19d5d0a07b4e2501c269f92
|
import pandas as pd
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import BaggingRegressor
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import AdaBoostRegressor
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble import RandomTreesEmbedding
from sklearn.neural_network import MLPRegressor
from sklearn.linear_model import ElasticNet
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.multioutput import MultiOutputRegressor
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import Imputer
from sklearn import metrics
import numpy as np
def get_gaussian_process_regressor():
gp = GaussianProcessRegressor()
return [gp],['Gaussian Process']
def get_mlp_regressor(num_hidden_units=51):
mlp = MLPRegressor(hidden_layer_sizes=num_hidden_units)
return [mlp],['Multi-Layer Perceptron']
def get_ensemble_models():
rf = RandomForestRegressor(n_estimators=51,min_samples_leaf=5,min_samples_split=3,random_state=42)
bag = BaggingRegressor(n_estimators=51,random_state=42)
extra = ExtraTreesRegressor(n_estimators=71,random_state=42)
ada = AdaBoostRegressor(random_state=42)
grad = GradientBoostingRegressor(n_estimators=101,random_state=42)
classifier_list = [rf,bag,extra,ada,grad]
classifier_name_list = ['Random Forests','Bagging','Extra Trees','AdaBoost','Gradient Boost']
return classifier_list, classifier_name_list
def get_linear_model():
elastic_net = ElasticNet()
return [elastic_net],['Elastic Net']
def print_evaluation_metrics(trained_model,trained_model_name,X_test,y_test):
print '--------- For Model : ', trained_model_name ,' ---------\n'
predicted_values = trained_model.predict(X_test)
print "Mean Absolute Error : ", metrics.mean_absolute_error(y_test,predicted_values)
print "Median Absolute Error : ", metrics.median_absolute_error(y_test,predicted_values)
print "Mean Squared Error : ", metrics.mean_squared_error(y_test,predicted_values)
print "R2 Score : ", metrics.r2_score(y_test,predicted_values)
print "---------------------------------------\n"
def label_encode_frame(dataframe):
columns = dataframe.columns
encoder = LabelEncoder()
for column in columns:
if type(dataframe[column][0]) is np.nan:
for i in range(len(dataframe)):
if i > 50000:
break
if type(dataframe[column][i]) is unicode or type(dataframe[column][i]) is bool or type(dataframe[column][i]) is str:
dataframe[column] = encoder.fit_transform(dataframe[column].values)
break
elif type(dataframe[column][0]) is unicode or type(dataframe[column][0]) is bool or type(dataframe[column][0]) is str:
dataframe[column] = encoder.fit_transform(dataframe[column].values)
return dataframe
filename = 'world_cup_data.csv'
cup_frame = pd.read_csv(filename)
columns_to_delete = ['ELO Rating','FIFA Rating','Position']
target_values = cup_frame['FIFA Rating'].values
cup_frame.drop(columns_to_delete,axis=1,inplace=True)
cup_frame = label_encode_frame(cup_frame)
X_train,X_test,y_train,y_test = train_test_split(cup_frame.values,target_values,test_size=0.2,random_state=42)
regressor_list,regressor_name_list = get_ensemble_models()
for regressor, regressor_name in zip(regressor_list,regressor_name_list):
regressor.fit(X_train,y_train)
print_evaluation_metrics(regressor,regressor_name,X_test,y_test)
|
rupakc/Kaggle-Compendium
|
World Cup 2010 - Take on Quants/cup-baseline.py
|
Python
|
mit
| 3,608
|
[
"Gaussian"
] |
45ae67ef9da024312bb1cdaab7a22def2fe52064e23bfccb76ba737ba76ef43f
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding: utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
""" Module: Profile
===============
"""
from __future__ import print_function
from .basic_observables import Number
from .intrinsic_distance import IntrinsicDistance
import numpy as np
from scipy import stats
from MDAnalysis.core.groups import Atom, AtomGroup, Residue, ResidueGroup
class Profile(object):
r"""Calculates the profile (normal, or intrinsic) of a given observable
across the simulation box.
:param Observable observable: :class:`Number <pytim.observables.Number>`,
:class:`Mass <pytim.observables.Mass>`, or
any other observable:
calculate the profile of this quantity. If
None is supplied, it defaults to the number
density. The number density is always
calculated on a per atom basis.
:param ITIM interface: if provided, calculate the intrinsic
profile with respect to the first layers
:param str direction: 'x','y', or 'z' : calculate the profile
along this direction. (default: 'z' or
the normal direction of the interface,
if provided.
:param bool MCnorm: if True (default) use a simple Monte Carlo
estimate the effective volumes of the bins.
:Keyword Arguments:
* MCpoints (int) --
number of points used for MC normalization (default, 10x the number
of atoms in the universe)
Example (non-intrinsic, total profile + first 4 layers ):
>>> import numpy as np
>>> import MDAnalysis as mda
>>> import pytim
>>> from pytim.datafiles import *
>>> from pytim.observables import Profile
>>>
>>> u = mda.Universe(WATER_GRO,WATER_XTC)
>>> g = u.select_atoms('name OW')
>>> # here we calculate the profiles of oxygens only (note molecular=False)
>>> inter = pytim.ITIM(u,group=g,max_layers=4,centered=True, molecular=False)
>>>
>>> # We create a list of 5 profiles, one for the total and 4 for the first
>>> # 4 layers.
>>> # Note that by default Profile() uses the number of atoms as an observable
>>> Layers = []
>>> for n in range(5):
... Layers.append(Profile())
>>>
>>> # Go through the trajectory, center the liquid slab and sample the profiles
>>> for ts in u.trajectory[::50]:
... # this shifts the system so that the center of mass of the liquid slab
... # is in the middle of the box
... inter.center()
...
... Layers[0].sample(g)
... Layers[1].sample(u.atoms[u.atoms.layers == 1 ])
... Layers[2].sample(u.atoms[u.atoms.layers == 2 ])
... Layers[3].sample(u.atoms[u.atoms.layers == 3 ])
... Layers[4].sample(u.atoms[u.atoms.layers == 4 ])
>>>
>>> density=[]
>>> for L in Layers:
... low,up,avg = L.get_values(binwidth=0.5)
... density.append(avg)
>>>
>>> # (low + up )/2 is the middle of the bin
>>> np.savetxt('profile.dat',list(zip(low,up,density[0],density[1],density[2],density[3],density[4])))
This results in the following profile (sampling more often and zooming close to the interface border)
.. image:: nonintrinsic_water.png
:width: 50%
Example: the intrinsic profile of a LJ liquid/vapour interface:
>>> import numpy as np
>>> import MDAnalysis as mda
>>> import pytim
>>> from pytim.datafiles import LJ_GRO, LJ_SHORT_XTC
>>> from pytim.observables import Profile
>>> u = mda.Universe(LJ_GRO,LJ_SHORT_XTC)
>>>
>>> inter = pytim.ITIM(u,alpha=2.5,cluster_cut=4.5)
>>> profile = Profile(interface=inter)
>>>
>>> for ts in u.trajectory:
... profile.sample(u.atoms)
>>>
>>> low, up, avg = profile.get_values(binwidth=0.5)
>>> np.savetxt('profile.dat',list(zip(low,up,avg)))
This results in the following profile (sampling a longer trajectory):
.. image:: intrinsic_lj.png
:width: 50%
Note the missing point at position = 0, this is the delta-function contirbution.
Negative positions are within the liquid phase, while positive ones are in the vapour
phase.
"""
def __init__(self,
direction=None,
observable=None,
interface=None,
symmetry='default',
mode='default',
MCnorm=True,
**kargs):
_dir = {'x': 0, 'y': 1, 'z': 2}
if direction is None:
try:
self._dir = interface.normal
except:
self._dir = 2
else:
self._dir = _dir[direction]
self.mode = mode
self.interface = interface
self._MCnorm = MCnorm
self.kargs = kargs
if symmetry == 'default' and interface is not None:
self.symmetry = self.interface.symmetry
else:
self.symmetry = symmetry
if observable is None:
self.observable = Number()
else:
self.observable = observable
self.binsize = 0.01 # this is used for internal calculations, the
# output binsize can be specified in
# self.get_values()
self.sampled_bins = None
self.sampled_values = None
self._range = None
self._counts = 0
self._totvol = []
def _determine_range(self, box):
upper = np.min(box)
if self._MCnorm:
upper = np.max(box)
r = np.array([0., upper])
if self._dir is not None:
r = np.array([0., box[self._dir]])
if self.interface is not None:
r -= r[1] / 2.
self._range = r
def _determine_bins(self):
nbins = int((self._range[1] - self._range[0]) / self.binsize)
# we need to make sure that the number of bins is odd, so that the
# central one encompasses zero (to make the delta-function
# contribution appear always in this bin)
if (nbins % 2 > 0):
nbins += 1
self._nbins = nbins
def _sample_random_distribution(self, group):
box = group.universe.dimensions[:3]
rnd_accum = np.array(0)
try:
size = self.kargs['MCpoints']
except:
# assume atomic volumes of ~ 30 A^3 and sample
# 10 points per atomic volue as a rule of thumb
size1 = int(np.prod(box) / 3.)
# just in case 'unphysical' densities are used:
size2 = 10 * len(group.universe.atoms)
size = np.max([size1, size2])
rnd = np.random.random((size, 3))
rnd *= self.interface.universe.dimensions[:3]
rnd_pos = IntrinsicDistance(
self.interface, symmetry=self.symmetry).compute(rnd)
rnd_accum, bins, _ = stats.binned_statistic(
rnd_pos,
np.ones(len(rnd_pos)),
range=self._range,
statistic='sum',
bins=self._nbins)
return rnd_accum, bins
def sample(self, group, **kargs):
# TODO: implement progressive averaging to handle very long trajs
# TODO: implement memory cleanup
if not isinstance(group, AtomGroup):
raise TypeError("The first argument passed to "
"Profile.sample() must be an AtomGroup.")
box = group.universe.trajectory.ts.dimensions[:3]
if self._range is None:
self._determine_range(box)
self._determine_bins()
v = np.prod(box)
self._totvol.append(v)
if self.interface is None:
pos = group.positions[::, self._dir]
else:
pos = IntrinsicDistance(
self.interface, symmetry=self.symmetry, mode=self.mode).compute(group)
if self._MCnorm is False:
rnd_accum = np.ones(self._nbins)
else:
rnd_accum, bins = self._sample_random_distribution(group)
values = self.observable.compute(group, **kargs)
accum, bins, _ = stats.binned_statistic(
pos,
values,
range=tuple(self._range),
statistic='sum',
bins=self._nbins)
accum[~np.isfinite(accum)] = 0.0
if self.sampled_values is None:
self.sampled_values = accum.copy()
if self.interface is not None:
self.sampled_rnd_values = rnd_accum.copy()
# stores the midpoints
self.sampled_bins = bins[1:] - self.binsize / 2.
else:
self.sampled_values += accum
if self.interface is not None:
self.sampled_rnd_values += rnd_accum
self._counts += 1
def get_values(self, binwidth=None, nbins=None):
if self.sampled_values is None:
print("Warning no profile sampled so far")
# we use the largest box (largest number of bins) as reference.
# Statistics will be poor at the boundaries, but like that we don't
# loose information
max_bins = len(self.sampled_bins)
max_size = max_bins * self.binsize
if binwidth is not None: # overrides nbins
nbins = max_size / binwidth
if nbins is None: # means also binwidth must be none
nbins = max_bins
if (nbins % 2 > 0):
nbins += 1
vals = self.sampled_values.copy()
vals /= (np.average(self._totvol) / self._nbins)
vals /= self._counts
if self.interface is not None:
# new versions of scipy.binned_statistic don't like inf
# we set it now to zero, but only here, so that the
# count is always available in self.sampled_values
deltabin = int(1 + (nbins - 1) // 2)
vals[deltabin] = 0
avg, bins, _ = stats.binned_statistic(
self.sampled_bins,
vals,
range=self._range,
statistic='mean',
bins=nbins)
if self.interface is not None:
_vol = self.sampled_rnd_values * self._nbins
_vol /= np.sum(self.sampled_rnd_values)
avgV, binsV, _ = stats.binned_statistic(
self.sampled_bins,
_vol,
range=self._range,
statistic='mean',
bins=nbins)
avg[deltabin] = np.inf
avg[avgV > 0.0] /= avgV[avgV > 0.0]
avg[avgV <= 0.0] = 0.0
return [bins[0:-1], bins[1:], avg]
@staticmethod
def _():
"""
>>> # this doctest checks that the same profile is
>>> # obtained after rotating the system, and that
>>> # it is consistent through versions
>>> import MDAnalysis as mda
>>> import numpy as np
>>> import pytim
>>> pytim.observables.Profile._()
>>> from pytim.datafiles import WATERSMALL_GRO
>>> from matplotlib import pyplot as plt
>>> u = mda.Universe(WATERSMALL_GRO)
>>> inter = pytim.ITIM(u,cluster_cut=3.5,alpha=2.5)
>>> print(inter.normal)
2
>>> np.set_printoptions(precision=8)
>>> np.random.seed(1) # for the MC normalization
>>> stdprof = pytim.observables.Profile()
>>> stdprof.sample(u.atoms)
>>> print(stdprof.get_values(binwidth=0.5)[2][:6])
[0.09229169 0.10959639 0.08075523 0.10959639 0.09805993 0.09805993]
>>> prof = pytim.observables.Profile(interface=inter)
>>> prof.sample(u.atoms)
>>> vals = prof.get_values(binwidth=0.5)[2]
>>> print(vals[len(vals)//2-3:len(vals)//2+3])
[0.07344066 0.04300743 0.02803522 inf 0. 0. ]
>>> sv = prof.sampled_values
>>> u.atoms.positions=np.roll(u.atoms.positions,1,axis=1)
>>> box = u.dimensions[:]
>>> box[0]=box[2]
>>> box[2]=box[1]
>>> u.dimensions = box
>>> inter = pytim.ITIM(u,cluster_cut=3.5,alpha=2.5)
>>> print(inter.normal)
0
>>> prof = pytim.observables.Profile(interface=inter)
>>> prof.sample(u.atoms)
>>> sv2 = prof.sampled_values
>>> print(np.all(sv==sv2))
True
>>> # We check now the profile computed with GITIM
>>> u = mda.Universe(WATERSMALL_GRO)
>>> g = u.select_atoms('name OW')
>>> inter = pytim.GITIM(u,group=g,alpha=2.5)
>>> print(inter.normal)
None
>>> np.random.seed(1) # for the MC normalization
>>> stdprof = pytim.observables.Profile()
>>> stdprof.sample(u.atoms)
>>> print(stdprof.get_values(binwidth=0.5)[2][:6])
[0.09229169 0.10959639 0.08075523 0.10959639 0.09805993 0.09805993]
>>> prof = pytim.observables.Profile(interface=inter)
>>> prof.sample(u.atoms)
>>> vals = prof.get_values(binwidth=1.0)[2]
>>> print(vals[len(vals)//2-4:len(vals)//2+2])
[0.09554818 0.09796541 0.05555127 0. inf 0. ]
"""
pass
|
Marcello-Sega/pytim
|
pytim/observables/profile.py
|
Python
|
gpl-3.0
| 13,430
|
[
"MDAnalysis"
] |
a34461562ee0aab8fd25ab7bb12d5760c5bb09a3a2e3f005d464fc1ba0178b31
|
"""
Spatial diagnostics module
"""
__author__ = "Luc Anselin luc.anselin@asu.edu, Daniel Arribas-Bel darribas@asu.edu"
from utils import spdot
from scipy.stats.stats import chisqprob
from scipy.stats import norm
import numpy as np
import numpy.linalg as la
__all__ = ['LMtests', 'MoranRes', 'AKtest']
class LMtests:
"""
Lagrange Multiplier tests. Implemented as presented in Anselin et al.
(1996) [1]_
...
Attributes
----------
ols : OLS
OLS regression object
w : W
Spatial weights instance
tests : list
Lists of strings with the tests desired to be performed.
Values may be:
* 'all': runs all the options (default)
* 'lme': LM error test
* 'rlme': Robust LM error test
* 'lml' : LM lag test
* 'rlml': Robust LM lag test
Parameters
----------
lme : tuple
(Only if 'lme' or 'all' was in tests). Pair of statistic and
p-value for the LM error test.
lml : tuple
(Only if 'lml' or 'all' was in tests). Pair of statistic and
p-value for the LM lag test.
rlme : tuple
(Only if 'rlme' or 'all' was in tests). Pair of statistic
and p-value for the Robust LM error test.
rlml : tuple
(Only if 'rlml' or 'all' was in tests). Pair of statistic
and p-value for the Robust LM lag test.
sarma : tuple
(Only if 'rlml' or 'all' was in tests). Pair of statistic
and p-value for the SARMA test.
References
----------
.. [1] Anselin, L., Bera, A. K., Florax, R., Yoon, M. J. (1996) "Simple
diagnostic tests for spatial dependence". Regional Science and Urban
Economics, 26, 77-104.
Examples
--------
>>> import numpy as np
>>> import pysal
>>> from ols import OLS
Open the csv file to access the data for analysis
>>> csv = pysal.open(pysal.examples.get_path('columbus.dbf'),'r')
Pull out from the csv the files we need ('HOVAL' as dependent as well as
'INC' and 'CRIME' as independent) and directly transform them into nx1 and
nx2 arrays, respectively
>>> y = np.array([csv.by_col('HOVAL')]).T
>>> x = np.array([csv.by_col('INC'), csv.by_col('CRIME')]).T
Create the weights object from existing .gal file
>>> w = pysal.open(pysal.examples.get_path('columbus.gal'), 'r').read()
Row-standardize the weight object (not required although desirable in some
cases)
>>> w.transform='r'
Run an OLS regression
>>> ols = OLS(y, x)
Run all the LM tests in the residuals. These diagnostics test for the
presence of remaining spatial autocorrelation in the residuals of an OLS
model and give indication about the type of spatial model. There are five
types: presence of a spatial lag model (simple and robust version),
presence of a spatial error model (simple and robust version) and joint presence
of both a spatial lag as well as a spatial error model.
>>> lms = pysal.spreg.diagnostics_sp.LMtests(ols, w)
LM error test:
>>> print round(lms.lme[0],4), round(lms.lme[1],4)
3.0971 0.0784
LM lag test:
>>> print round(lms.lml[0],4), round(lms.lml[1],4)
0.9816 0.3218
Robust LM error test:
>>> print round(lms.rlme[0],4), round(lms.rlme[1],4)
3.2092 0.0732
Robust LM lag test:
>>> print round(lms.rlml[0],4), round(lms.rlml[1],4)
1.0936 0.2957
LM SARMA test:
>>> print round(lms.sarma[0],4), round(lms.sarma[1],4)
4.1907 0.123
"""
def __init__(self, ols, w, tests=['all']):
cache = spDcache(ols, w)
if tests == ['all']:
tests = ['lme', 'lml', 'rlme', 'rlml', 'sarma']
if 'lme' in tests:
self.lme = lmErr(ols, w, cache)
if 'lml' in tests:
self.lml = lmLag(ols, w, cache)
if 'rlme' in tests:
self.rlme = rlmErr(ols, w, cache)
if 'rlml' in tests:
self.rlml = rlmLag(ols, w, cache)
if 'sarma' in tests:
self.sarma = lmSarma(ols, w, cache)
class MoranRes:
"""
Moran's I for spatial autocorrelation in residuals from OLS regression
...
Parameters
----------
ols : OLS
OLS regression object
w : W
Spatial weights instance
z : boolean
If set to True computes attributes eI, vI and zI. Due to computational burden of vI, defaults to False.
Attributes
----------
I : float
Moran's I statistic
eI : float
Moran's I expectation
vI : float
Moran's I variance
zI : float
Moran's I standardized value
Examples
--------
>>> import numpy as np
>>> import pysal
>>> from ols import OLS
Open the csv file to access the data for analysis
>>> csv = pysal.open(pysal.examples.get_path('columbus.dbf'),'r')
Pull out from the csv the files we need ('HOVAL' as dependent as well as
'INC' and 'CRIME' as independent) and directly transform them into nx1 and
nx2 arrays, respectively
>>> y = np.array([csv.by_col('HOVAL')]).T
>>> x = np.array([csv.by_col('INC'), csv.by_col('CRIME')]).T
Create the weights object from existing .gal file
>>> w = pysal.open(pysal.examples.get_path('columbus.gal'), 'r').read()
Row-standardize the weight object (not required although desirable in some
cases)
>>> w.transform='r'
Run an OLS regression
>>> ols = OLS(y, x)
Run Moran's I test for residual spatial autocorrelation in an OLS model.
This computes the traditional statistic applying a correction in the
expectation and variance to account for the fact it comes from residuals
instead of an independent variable
>>> m = pysal.spreg.diagnostics_sp.MoranRes(ols, w, z=True)
Value of the Moran's I statistic:
>>> print round(m.I,4)
0.1713
Value of the Moran's I expectation:
>>> print round(m.eI,4)
-0.0345
Value of the Moran's I variance:
>>> print round(m.vI,4)
0.0081
Value of the Moran's I standardized value. This is
distributed as a standard Normal(0, 1)
>>> print round(m.zI,4)
2.2827
P-value of the standardized Moran's I value (z):
>>> print round(m.p_norm,4)
0.0224
"""
def __init__(self, ols, w, z=False):
cache = spDcache(ols, w)
self.I = get_mI(ols, w, cache)
if z:
self.eI = get_eI(ols, w, cache)
self.vI = get_vI(ols, w, self.eI, cache)
self.zI, self.p_norm = get_zI(self.I, self.eI, self.vI)
class AKtest:
"""
Moran's I test of spatial autocorrelation for IV estimation.
Implemented following the original reference Anselin and Kelejian
(1997) [AK97]_
...
Parameters
----------
iv : TSLS
Regression object from TSLS class
w : W
Spatial weights instance
case : string
Flag for special cases (default to 'nosp'):
* 'nosp': Only NO spatial end. reg.
* 'gen': General case (spatial lag + end. reg.)
Attributes
----------
mi : float
Moran's I statistic for IV residuals
ak : float
Square of corrected Moran's I for residuals::
.. math::
ak = \dfrac{N \times I^*}{\phi^2}
Note: if case='nosp' then it simplifies to the LMerror
p : float
P-value of the test
References
----------
.. [AK97] Anselin, L., Kelejian, H. (1997) "Testing for spatial error
autocorrelation in the presence of endogenous regressors".
Interregional Regional Science Review, 20, 1.
.. [2] Kelejian, H.H., Prucha, I.R. and Yuzefovich, Y. (2004)
"Instrumental variable estimation of a spatial autorgressive model with
autoregressive disturbances: large and small sample results".
Advances in Econometrics, 18, 163-198.
Examples
--------
We first need to import the needed modules. Numpy is needed to convert the
data we read into arrays that ``spreg`` understands and ``pysal`` to
perform all the analysis. The TSLS is required to run the model on
which we will perform the tests.
>>> import numpy as np
>>> import pysal
>>> from twosls import TSLS
>>> from twosls_sp import GM_Lag
Open data on Columbus neighborhood crime (49 areas) using pysal.open().
This is the DBF associated with the Columbus shapefile. Note that
pysal.open() also reads data in CSV format; since the actual class
requires data to be passed in as numpy arrays, the user can read their
data in using any method.
>>> db = pysal.open(pysal.examples.get_path("columbus.dbf"),'r')
Before being able to apply the diagnostics, we have to run a model and,
for that, we need the input variables. Extract the CRIME column (crime
rates) from the DBF file and make it the dependent variable for the
regression. Note that PySAL requires this to be an numpy array of shape
(n, 1) as opposed to the also common shape of (n, ) that other packages
accept.
>>> y = np.array(db.by_col("CRIME"))
>>> y = np.reshape(y, (49,1))
Extract INC (income) vector from the DBF to be used as
independent variables in the regression. Note that PySAL requires this to
be an nxj numpy array, where j is the number of independent variables (not
including a constant). By default this model adds a vector of ones to the
independent variables passed in, but this can be overridden by passing
constant=False.
>>> X = []
>>> X.append(db.by_col("INC"))
>>> X = np.array(X).T
In this case, we consider HOVAL (home value) as an endogenous regressor,
so we acknowledge that by reading it in a different category.
>>> yd = []
>>> yd.append(db.by_col("HOVAL"))
>>> yd = np.array(yd).T
In order to properly account for the endogeneity, we have to pass in the
instruments. Let us consider DISCBD (distance to the CBD) is a good one:
>>> q = []
>>> q.append(db.by_col("DISCBD"))
>>> q = np.array(q).T
Now we are good to run the model. It is an easy one line task.
>>> reg = TSLS(y, X, yd, q=q)
Now we are concerned with whether our non-spatial model presents spatial
autocorrelation in the residuals. To assess this possibility, we can run
the Anselin-Kelejian test, which is a version of the classical LM error
test adapted for the case of residuals from an instrumental variables (IV)
regression. First we need an extra object, the weights matrix, which
includes the spatial configuration of the observations
into the error component of the model. To do that, we can open an already
existing gal file or create a new one. In this case, we will create one
from ``columbus.shp``.
>>> w = pysal.rook_from_shapefile(pysal.examples.get_path("columbus.shp"))
Unless there is a good reason not to do it, the weights have to be
row-standardized so every row of the matrix sums to one. Among other
things, this allows to interpret the spatial lag of a variable as the
average value of the neighboring observations. In PySAL, this can be
easily performed in the following way:
>>> w.transform = 'r'
We are good to run the test. It is a very simple task:
>>> ak = AKtest(reg, w)
And explore the information obtained:
>>> print('AK test: %f\tP-value: %f'%(ak.ak, ak.p))
AK test: 4.642895 P-value: 0.031182
The test also accomodates the case when the residuals come from an IV
regression that includes a spatial lag of the dependent variable. The only
requirement needed is to modify the ``case`` parameter when we call
``AKtest``. First, let us run a spatial lag model:
>>> reg_lag = GM_Lag(y, X, yd, q=q, w=w)
And now we can run the AK test and obtain similar information as in the
non-spatial model.
>>> ak_sp = AKtest(reg, w, case='gen')
>>> print('AK test: %f\tP-value: %f'%(ak_sp.ak, ak_sp.p))
AK test: 1.157593 P-value: 0.281965
"""
def __init__(self, iv, w, case='nosp'):
if case == 'gen':
cache = spDcache(iv, w)
self.mi, self.ak, self.p = akTest(iv, w, cache)
elif case == 'nosp':
cache = spDcache(iv, w)
self.mi = get_mI(iv, w, cache)
self.ak, self.p = lmErr(iv, w, cache)
else:
print """\n
Fix the optional argument 'case' to match the requirements:
* 'gen': General case (spatial lag + end. reg.)
* 'nosp': No spatial end. reg.
\n"""
class spDcache:
"""
Helper class to compute reusable pieces in the spatial diagnostics module
...
Parameters
----------
reg : OLS_dev, TSLS_dev, STSLS_dev
Instance from a regression class
w : W
Spatial weights instance
Attributes
----------
j : array
1x1 array with the result from:
.. math::
J = \dfrac{1}{[(WX\beta)' M (WX\beta) + T \sigma^2]}
wu : array
nx1 array with spatial lag of the residuals
utwuDs : array
1x1 array with the result from:
.. math::
utwuDs = \dfrac{u' W u}{\tilde{\sigma^2}}
utwyDs : array
1x1 array with the result from:
.. math::
utwyDs = \dfrac{u' W y}{\tilde{\sigma^2}}
t : array
1x1 array with the result from :
.. math::
T = tr[(W' + W) W]
trA : float
Trace of A as in Cliff & Ord (1981)
"""
def __init__(self, reg, w):
self.reg = reg
self.w = w
self._cache = {}
@property
def j(self):
if 'j' not in self._cache:
wxb = self.w.sparse * self.reg.predy
wxb2 = np.dot(wxb.T, wxb)
xwxb = spdot(self.reg.x.T, wxb)
num1 = wxb2 - np.dot(xwxb.T, np.dot(self.reg.xtxi, xwxb))
num = num1 + (self.t * self.reg.sig2n)
den = self.reg.n * self.reg.sig2n
self._cache['j'] = num / den
return self._cache['j']
@property
def wu(self):
if 'wu' not in self._cache:
self._cache['wu'] = self.w.sparse * self.reg.u
return self._cache['wu']
@property
def utwuDs(self):
if 'utwuDs' not in self._cache:
res = np.dot(self.reg.u.T, self.wu) / self.reg.sig2n
self._cache['utwuDs'] = res
return self._cache['utwuDs']
@property
def utwyDs(self):
if 'utwyDs' not in self._cache:
res = np.dot(self.reg.u.T, self.w.sparse * self.reg.y)
self._cache['utwyDs'] = res / self.reg.sig2n
return self._cache['utwyDs']
@property
def t(self):
if 't' not in self._cache:
prod = (self.w.sparse.T + self.w.sparse) * self.w.sparse
self._cache['t'] = np.sum(prod.diagonal())
return self._cache['t']
@property
def trA(self):
if 'trA' not in self._cache:
xtwx = spdot(self.reg.x.T, spdot(self.w.sparse, self.reg.x))
mw = np.dot(self.reg.xtxi, xtwx)
self._cache['trA'] = np.sum(mw.diagonal())
return self._cache['trA']
@property
def AB(self):
"""
Computes A and B matrices as in Cliff-Ord 1981, p. 203
"""
if 'AB' not in self._cache:
U = (self.w.sparse + self.w.sparse.T) / 2.
z = spdot(U, self.reg.x, array_out=False)
c1 = spdot(self.reg.x.T, z, array_out=False)
c2 = spdot(z.T, z, array_out=False)
G = self.reg.xtxi
A = spdot(G, c1)
B = spdot(G, c2)
self._cache['AB'] = [A, B]
return self._cache['AB']
def lmErr(reg, w, spDcache):
"""
LM error test. Implemented as presented in eq. (9) of Anselin et al.
(1996) [1]_
...
Attributes
----------
reg : OLS_dev, TSLS_dev, STSLS_dev
Instance from a regression class
w : W
Spatial weights instance
spDcache : spDcache
Instance of spDcache class
Returns
-------
lme : tuple
Pair of statistic and p-value for the LM error test.
References
----------
.. _ Anselin, L., Bera, A. K., Florax, R., Yoon, M. J. (1996) "Simple
diagnostic tests for spatial dependence". Regional Science and Urban
Economics, 26, 77-104.
"""
lm = spDcache.utwuDs ** 2 / spDcache.t
pval = chisqprob(lm, 1)
return (lm[0][0], pval[0][0])
def lmLag(ols, w, spDcache):
"""
LM lag test. Implemented as presented in eq. (13) of Anselin et al.
(1996) [1]_
...
Attributes
----------
ols : OLS_dev
Instance from an OLS_dev regression
w : W
Spatial weights instance
spDcache : spDcache
Instance of spDcache class
Returns
-------
lml : tuple
Pair of statistic and p-value for the LM lag test.
References
----------
.. _ Anselin, L., Bera, A. K., Florax, R., Yoon, M. J. (1996) "Simple
diagnostic tests for spatial dependence". Regional Science and Urban
Economics, 26, 77-104.
"""
lm = spDcache.utwyDs ** 2 / (ols.n * spDcache.j)
pval = chisqprob(lm, 1)
return (lm[0][0], pval[0][0])
def rlmErr(ols, w, spDcache):
"""
Robust LM error test. Implemented as presented in eq. (8) of Anselin et al. (1996) [1]_
NOTE: eq. (8) has an errata, the power -1 in the denominator should be inside the square bracket.
...
Attributes
----------
ols : OLS_dev
Instance from an OLS_dev regression
w : W
Spatial weights instance
spDcache : spDcache
Instance of spDcache class
Returns
-------
rlme : tuple
Pair of statistic and p-value for the Robust LM error test.
References
----------
.. _ Anselin, L., Bera, A. K., Florax, R., Yoon, M. J. (1996) "Simple
diagnostic tests for spatial dependence". Regional Science and Urban
Economics, 26, 77-104.
"""
nj = ols.n * spDcache.j
num = (spDcache.utwuDs - (spDcache.t * spDcache.utwyDs) / nj) ** 2
den = spDcache.t * (1. - (spDcache.t / nj))
lm = num / den
pval = chisqprob(lm, 1)
return (lm[0][0], pval[0][0])
def rlmLag(ols, w, spDcache):
"""
Robust LM lag test. Implemented as presented in eq. (12) of Anselin et al.
(1996) [1]_
...
Attributes
----------
ols : OLS_dev
Instance from an OLS_dev regression
w : W
Spatial weights instance
spDcache : spDcache
Instance of spDcache class
Returns
-------
rlml : tuple
Pair of statistic and p-value for the Robust LM lag test.
References
----------
.. _ Anselin, L., Bera, A. K., Florax, R., Yoon, M. J. (1996) "Simple
diagnostic tests for spatial dependence". Regional Science and Urban
Economics, 26, 77-104.
"""
lm = (spDcache.utwyDs - spDcache.utwuDs) ** 2 / \
((ols.n * spDcache.j) - spDcache.t)
pval = chisqprob(lm, 1)
return (lm[0][0], pval[0][0])
def lmSarma(ols, w, spDcache):
"""
LM error test. Implemented as presented in eq. (15) of Anselin et al.
(1996) [1]_
...
Attributes
----------
ols : OLS_dev
Instance from an OLS_dev regression
w : W
Spatial weights instance
spDcache : spDcache
Instance of spDcache class
Returns
-------
sarma : tuple
Pair of statistic and p-value for the LM sarma test.
References
----------
.. _ Anselin, L., Bera, A. K., Florax, R., Yoon, M. J. (1996) "Simple
diagnostic tests for spatial dependence". Regional Science and Urban
Economics, 26, 77-104.
"""
first = (spDcache.utwyDs - spDcache.utwuDs) ** 2 / \
(w.n * spDcache.j - spDcache.t)
secnd = spDcache.utwuDs ** 2 / spDcache.t
lm = first + secnd
pval = chisqprob(lm, 2)
return (lm[0][0], pval[0][0])
def get_mI(reg, w, spDcache):
"""
Moran's I statistic of spatial autocorrelation as showed in Cliff & Ord
(1981) [CO81]_, p. 201-203
...
Attributes
----------
reg : OLS_dev, TSLS_dev, STSLS_dev
Instance from a regression class
w : W
Spatial weights instance
spDcache : spDcache
Instance of spDcache class
Returns
-------
moran : float
Statistic Moran's I test.
References
----------
.. [CO81] Cliff, AD., Ord, JK. (1981) "Spatial processes: models & applications".
Pion London
"""
mi = (w.n * np.dot(reg.u.T, spDcache.wu)) / (w.s0 * reg.utu)
return mi[0][0]
def get_vI(ols, w, ei, spDcache):
"""
Moran's I variance coded as in Cliff & Ord 1981 (p. 201-203) and R's spdep
"""
A = spDcache.AB[0]
trA2 = np.dot(A, A)
trA2 = np.sum(trA2.diagonal())
B = spDcache.AB[1]
trB = np.sum(B.diagonal()) * 4.
vi = (w.n ** 2 / (w.s0 ** 2 * (w.n - ols.k) * (w.n - ols.k + 2.))) * \
(w.s1 + 2. * trA2 - trB -
((2. * (spDcache.trA ** 2)) / (w.n - ols.k)))
return vi
def get_eI(ols, w, spDcache):
"""
Moran's I expectation using matrix M
"""
return - (w.n * spDcache.trA) / (w.s0 * (w.n - ols.k))
def get_zI(I, ei, vi):
"""
Standardized I
Returns two-sided p-values as provided in the GeoDa family
"""
z = abs((I - ei) / np.sqrt(vi))
pval = norm.sf(z) * 2.
return (z, pval)
def akTest(iv, w, spDcache):
"""
Computes AK-test for the general case (end. reg. + sp. lag)
...
Parameters
----------
iv : STSLS_dev
Instance from spatial 2SLS regression
w : W
Spatial weights instance
spDcache : spDcache
Instance of spDcache class
Attributes
----------
mi : float
Moran's I statistic for IV residuals
ak : float
Square of corrected Moran's I for residuals::
.. math::
ak = \dfrac{N \times I^*}{\phi^2}
p : float
P-value of the test
ToDo:
* Code in as Nancy
* Compare both
"""
mi = get_mI(iv, w, spDcache)
# Phi2
etwz = spdot(iv.u.T, spdot(w.sparse, iv.z))
a = np.dot(etwz, np.dot(iv.varb, etwz.T))
s12 = (w.s0 / w.n) ** 2
phi2 = (spDcache.t + (4.0 / iv.sig2n) * a) / (s12 * w.n)
ak = w.n * mi ** 2 / phi2
pval = chisqprob(ak, 1)
return (mi, ak[0][0], pval[0][0])
def _test():
import doctest
doctest.testmod()
if __name__ == '__main__':
_test()
|
spreg-git/pysal
|
pysal/spreg/diagnostics_sp.py
|
Python
|
bsd-3-clause
| 23,989
|
[
"COLUMBUS"
] |
8bf4370d3f9820d9687a97b913da2a47c4cc93cee219f98140b5d03476a98b1b
|
# This file is part of cclib (http://cclib.sf.net), a library for parsing
# and interpreting the results of computational chemistry packages.
#
# Copyright (C) 2006, the cclib development team
#
# The library is free software, distributed under the terms of
# the GNU Lesser General Public version 2.1 or later. You should have
# received a copy of the license along with cclib. You can also access
# the full license online at http://www.gnu.org/copyleft/lgpl.html.
__revision__ = "$Revision$"
import logging
import numpy
from .calculationmethod import Method
class Population(Method):
"""A base class for all population-type methods."""
def __init__(self, data, progress=None, \
loglevel=logging.INFO, logname="Log"):
# Call the __init__ method of the superclass.
super(Population, self).__init__(data, progress, loglevel, logname)
self.fragresults = None
def __str__(self):
"""Return a string representation of the object."""
return "Population"
def __repr__(self):
"""Return a representation of the object."""
return "Population"
def partition(self, indices=None):
if not hasattr(self, "aoresults"):
self.calculate()
if not indices:
# Build list of groups of orbitals in each atom for atomresults.
if hasattr(self.data, "aonames"):
names = self.data.aonames
elif hasattr(self.data, "fonames"):
names = self.data.fonames
atoms = []
indices = []
name = names[0].split('_')[0]
atoms.append(name)
indices.append([0])
for i in range(1, len(names)):
name = names[i].split('_')[0]
try:
index = atoms.index(name)
except ValueError: #not found in atom list
atoms.append(name)
indices.append([i])
else:
indices[index].append(i)
natoms = len(indices)
nmocoeffs = len(self.aoresults[0])
# Build results numpy array[3].
alpha = len(self.aoresults[0])
results = []
results.append(numpy.zeros([alpha, natoms], "d"))
if len(self.aoresults) == 2:
beta = len(self.aoresults[1])
results.append(numpy.zeros([beta, natoms], "d"))
# For each spin, splice numpy array at ao index,
# and add to correct result row.
for spin in range(len(results)):
for i in range(natoms): # Number of groups.
for j in range(len(indices[i])): # For each group.
temp = self.aoresults[spin][:, indices[i][j]]
results[spin][:, i] = numpy.add(results[spin][:, i], temp)
self.logger.info("Saving partitioned results in fragresults: [array[2]]")
self.fragresults = results
return True
if __name__ == "__main__":
import doctest, population
doctest.testmod(population, verbose=False)
|
Clyde-fare/cclib_bak
|
src/cclib/method/population.py
|
Python
|
lgpl-2.1
| 3,224
|
[
"cclib"
] |
2a83d8a195f26b9ad4d810087d9e42420bc745f81c9fb77f8407e36d43d99578
|
# coding=utf-8
from __future__ import division
import numpy as _np
import scipy.stats as _stats
from scipy.signal import gaussian as _gaussian, filtfilt as _filtfilt, filter_design as _filter_design, \
deconvolve as _deconvolve, firwin as _firwin, convolve as _convolve
from matplotlib.pyplot import plot as _plot
from ..BaseFilter import Filter as _Filter
from ..Signal import EvenlySignal as _EvenlySignal, UnevenlySignal as _UnevenlySignal
from ..Utility import abstractmethod as _abstract
from ..tools.Tools import SignalRange
from collections import Sequence
__author__ = 'AleB'
class Normalize(_Filter):
"""
Normalized the input signal using the general formula: ( signal - BIAS ) / RANGE
Parameters
-------------------
norm_method :
Method for the normalization. Available methods are:
* 'mean' - remove the mean [ BIAS = mean(signal); RANGE = 1 ]
* 'standard' - standardization [ BIAS = mean(signal); RANGE = std(signal) ]
* 'min' - remove the minimum [ BIAS = min(signal); RANGE = 1 ]
* 'maxmin' - maxmin normalization [ BIAS = min(signal); RANGE = ( max(signal) - min(signal ) ]
* 'custom' - custom, bias and range are manually defined [ BIAS = bias, RANGE = range ]
norm_bias : float, default = 0
Bias for custom normalization
norm_range : float, !=0, default = 1
Range for custom normalization
Returns
-------
signal:
The normalized signal.
"""
def __init__(self, norm_method='standard', norm_bias=0, norm_range=1):
assert norm_method in ['mean', 'standard', 'min', 'maxmin', 'custom'],\
"norm_method must be one of 'mean', 'standard', 'min', 'maxmin', 'custom'"
if norm_method == "custom":
assert norm_range != 0, "norm_range must not be zero"
_Filter.__init__(self, norm_method=norm_method, norm_bias=norm_bias, norm_range=norm_range)
@classmethod
def algorithm(cls, signal, params):
from ..indicators.TimeDomain import Mean as _Mean, StDev as _StDev
method = params['norm_method']
if method == "mean":
return signal - _Mean()(signal)
elif method == "standard":
return (signal - _Mean()(signal)) / _StDev()(signal)
elif method == "min":
return signal - _np.min(signal)
elif method == "maxmin":
return (signal - _np.min(signal)) / (_np.max(signal) - _np.min(signal))
elif method == "custom":
return (signal - params['norm_bias']) / params['norm_range']
class IIRFilter(_Filter):
"""
Filter the input signal using an Infinite Impulse Response filter.
Parameters
----------
fp : list or float
The pass frequencies
fs : list or float
The stop frequencies
Optional parameters
-------------------
loss : float, >0, default = 0.1
Loss tolerance in the pass band
att : float, >0, default = 40
Minimum attenuation required in the stop band.
ftype : str, default = 'butter'
Type of filter. Available types: 'butter', 'cheby1', 'cheby2', 'ellip', 'bessel'
Returns
-------
signal : EvenlySignal
Filtered signal
Notes
-----
This is a wrapper of *scipy.signal.filter_design.iirdesign*. Refer to `scipy.signal.filter_design.iirdesign`
for additional information
"""
def __init__(self, fp, fs, loss=.1, att=40, ftype='butter'):
assert loss > 0, "Loss value should be positive"
assert att > 0, "Attenuation value should be positive"
assert att > loss, "Attenuation value should be greater than loss value"
assert ftype in ['butter', 'cheby1', 'cheby2', 'ellip', 'bessel'],\
"Filter type must be in ['butter', 'cheby1', 'cheby2', 'ellip', 'bessel']"
_Filter.__init__(self, fp=fp, fs=fs, loss=loss, att=att, ftype=ftype)
@classmethod
def algorithm(cls, signal, params):
fsamp = signal.get_sampling_freq()
fp, fs, loss, att, ftype = params["fp"], params["fs"], params["loss"], params["att"], params["ftype"]
if isinstance(signal, _UnevenlySignal):
cls.warn('Filtering Unevenly signal is undefined. Returning original signal.')
return signal
nyq = 0.5 * fsamp
fp = _np.array(fp)
fs = _np.array(fs)
wp = fp / nyq
ws = fs / nyq
# noinspection PyTupleAssignmentBalance
b, a = _filter_design.iirdesign(wp, ws, loss, att, ftype=ftype, output="ba")
sig_filtered = signal.clone_properties(_filtfilt(b, a, signal.get_values()))
if _np.isnan(sig_filtered[0]):
cls.warn('Filter parameters allow no solution. Returning original signal.')
return signal
else:
return sig_filtered
@_abstract
def plot(self):
pass
class FIRFilter(_Filter):
"""
Filter the input signal using a Finite Impulse Response filter.
Parameters
----------
fp : list or float
The pass frequencies
fs : list or float
The stop frequencies
Optional parameters
-------------------
loss : float, >0, default = 0.1
Loss tolerance in the pass band
att : float, >0, default = 40
Minimum attenuation required in the stop band.
wtype : str, default = 'hamming'
Type of filter. Available types: 'hamming'
Returns
-------
signal : EvenlySignal
Filtered signal
Notes
-----
This is a wrapper of *scipy.signal.firwin*. Refer to `scipy.signal.firwin`
for additional information
"""
def __init__(self, fp, fs, loss=0.1, att=40, wtype='hamming'):
assert loss > 0, "Loss value should be positive"
assert att > 0, "Attenuation value should be positive"
assert att > loss, "Attenuation value should be greater than loss value"
assert wtype in ['hamming'],\
"Window type must be in ['hamming']"
_Filter.__init__(self, fp=fp, fs=fs, loss=loss, att=att, wtype=wtype)
@classmethod
def algorithm(cls, signal, params):
fsamp = signal.get_sampling_freq()
fp, fs, loss, att, wtype = params["fp"], params["fs"], params["loss"], params["att"], params["wtype"]
if isinstance(signal, _UnevenlySignal):
cls.warn('Filtering Unevenly signal is undefined. Returning original signal.')
return signal
fp = _np.array(fp)
fs = _np.array(fs)
if att>0:
att = -att
d1 = 10**(loss/10)
d2 = 10**(att/10)
Dsamp = _np.min(abs(fs-fp))/fsamp
# from https://dsp.stackexchange.com/questions/31066/how-many-taps-does-an-fir-filter-need
N = int(2/3*_np.log10(1/(10*d1*d2))*fsamp/Dsamp)
pass_zero=True
if isinstance(fp, Sequence):
if fp[0]>fs[0]:
pass_zero=False
else:
if fp[0]>fs[0]:
pass_zero=False
nyq = 0.5 * fsamp
fp = _np.array(fp)
wp = fp / nyq
if N%2 ==0:
N+=1
b = _firwin(N, wp, width=Dsamp, window=wtype, pass_zero=pass_zero)
sig_filtered = signal.clone_properties(_convolve(signal.get_values(), b, mode='same'))
if _np.isnan(sig_filtered[0]):
cls.warn('Filter parameters allow no solution. Returning original signal.')
return signal
else:
return sig_filtered
@_abstract
def plot(self):
pass
class KalmanFilter(_Filter):
def __init__(self, R, ratio=1, win_len=1, win_step=0.5):
assert R > 0, "R should be positive"
if ratio is not None:
assert ratio > 1, "ratio should be >1"
assert win_len > 0, "Window length value should be positive"
assert win_step > 0, "Window step value should be positive"
_Filter.__init__(self, R=R, ratio=ratio, win_len=win_len, win_step=win_step)
@classmethod
def algorithm(cls, signal, params):
R = params['R']
ratio = params['ratio']
win_len = params['win_len']
win_step = params['win_step']
sz = len(signal)
rr = SignalRange(win_len, win_step)(signal)
Q = _np.nanmedian(rr)/ratio
P = 1
x_out = signal.get_values().copy()
for k in range(1,sz):
x_ = x_out[k-1]
P_ = P + Q
# measurement update
K = P_ / (P_ + R)
x_out[k] = x_ + K * (x_out[k] - x_)
P = (1 - K ) * P_
x_out = signal.clone_properties(x_out)
return(x_out)
############
class ImputeNAN(_Filter):
def __init__(self, win_len=5, allnan='nan'):
assert win_len>0, "win_len should be >0"
assert allnan in ['zeros', 'nan']
_Filter.__init__(self, win_len = win_len, allnan=allnan)
@classmethod
def algorithm(cls, signal, params):
def group_consecutives(vals, step=1):
"""Return list of consecutive lists of numbers from vals (number list)."""
run = []
result = [run]
expect = None
for v in vals:
if (v == expect) or (expect is None):
run.append(v)
else:
run = [v]
result.append(run)
expect = v + step
return result
#%
win_len = params['win_len']*signal.get_sampling_freq()
allnan = params['allnan']
s = signal.get_values().copy()
if _np.isnan(s).all():
if allnan == 'nan':
return(signal)
else:
s = _np.zeros_like(s)
s_out = signal.clone_properties(s)
return(s_out)
idx_nan = _np.where(_np.isnan(s))[0]
segments = group_consecutives(idx_nan)
#%
if len(segments[0])>=1:
for i_seg, SEG in enumerate(segments):
idx_st = SEG[0]
idx_sp = SEG[-1]
idx_win_pre = _np.arange(-int(win_len/2), 0, 1)+idx_st
idx_win_pre = idx_win_pre[_np.where(idx_win_pre>0)[0]] #not before signal start
STD = []
if len(idx_win_pre)>=3:
STD.append(_np.nanstd(s[idx_win_pre]))
idx_win_post = _np.arange(0, int(win_len/2))+idx_sp+1
idx_win_post = idx_win_post[_np.where(idx_win_post<len(s))[0]]
if len(idx_win_post)>=3:
STD.append(_np.nanstd(s[idx_win_post]))
if len(STD)>0 and not (_np.isnan(STD).all()):
STD = _np.nanmin(STD)
else:
STD = 0
idx_win = _np.hstack([idx_win_pre, idx_win_post]).astype(int)
idx_win = idx_win[_np.where(~_np.isnan(s[idx_win]))[0]] # remove nans
if len(idx_win)>3:
R = _stats.linregress(idx_win, s[idx_win])
s_nan = _np.array(SEG)*R[0]+R[1] + _np.random.normal(scale=STD, size = len(SEG))
else:
s_nan = _np.nanmean(s)*_np.ones(len(SEG))
s[SEG] = s_nan
signal_out = signal.clone_properties(s)
return(signal_out)
class RemoveSpikes(_Filter):
def __init__(self, K=2, N=1, dilate=0, D=0.95, method='step'):
assert K > 0, "K should be positive"
assert isinstance(N, int) and N>0, "N value not valid"
assert dilate>=0, "dilate should be >= 0.0"
assert D>=0, "D should be >= 0.0"
assert method in ['linear', 'step']
_Filter.__init__(self, K=K, N=N, dilate=dilate, D=D, method=method)
@classmethod
def algorithm(cls, signal, params):
K = params['K']
N = params['N']
dilate = params['dilate']
D = params['D']
method = params['method']
fs = signal.get_sampling_freq()
sig_diff = abs(signal[N:] - signal[:-N])
ds_mean = _np.nanmean(sig_diff)
idx_spikes = _np.where(sig_diff>K*ds_mean)[0]+N//2
spikes = _np.zeros(len(signal))
spikes[idx_spikes] = 1
win = _np.ones(1+int(2*dilate*fs))
spikes = _np.convolve(spikes, win, 'same')
idx_spikes = _np.where(spikes>0)[0]
x_out = signal.get_values().copy()
#TODO add linear connector method
if method == 'linear':
diff_idx_spikes = _np.diff(idx_spikes)
new_spike = _np.where(diff_idx_spikes > 1)[0] + 1
new_spike = _np.r_[0, new_spike, -1]
for I in range(len(new_spike)-1):
IDX_START = idx_spikes[new_spike[I]] -1
IDX_STOP = idx_spikes[new_spike[I+1]-1] +1
L = IDX_STOP - IDX_START + 1
x_start = x_out[IDX_START]
x_stop = x_out[IDX_STOP]
coefficient = (x_stop - x_start)/ L
x_out[IDX_START:IDX_STOP+1] = coefficient*_np.arange(L) + x_start
else:
for IDX in idx_spikes:
delta = x_out[IDX] - x_out[IDX-1]
x_out[IDX:] = x_out[IDX:] - D*delta
x_out = signal.clone_properties(x_out)
return(x_out)
class DenoiseEDA(_Filter):
"""
Remove noise due to sensor displacement from the EDA signal.
Parameters
----------
threshold : float, >0
Threshold to detect the noise
Optional parameters
-------------------
win_len : float, >0, default = 2
Length of the window
Returns
-------
signal : EvenlySignal
De-noised signal
"""
def __init__(self, threshold, win_len=2):
assert threshold > 0, "Threshold value should be positive"
assert win_len > 0, "Window length value should be positive"
_Filter.__init__(self, threshold=threshold, win_len=win_len)
@classmethod
def algorithm(cls, signal, params):
threshold = params['threshold']
win_len = params['win_len']
# remove fluctiations
noise = ConvolutionalFilter(irftype='triang', win_len=win_len, normalize=True)(abs(_np.diff(signal)))
# identify noisy portions
idx_ok = _np.where(noise <= threshold)[0]
# fix start and stop of the signal for the following interpolation
if idx_ok[0] != 0:
idx_ok = _np.r_[0, idx_ok].astype(int)
if idx_ok[-1] != len(signal) - 1:
idx_ok = _np.r_[idx_ok, len(signal) - 1].astype(int)
denoised = _UnevenlySignal(signal[idx_ok], signal.get_sampling_freq(), x_values=idx_ok, x_type='indices',
duration=signal.get_duration())
# interpolation
signal_out = denoised.to_evenly('linear')
return signal_out
class ConvolutionalFilter(_Filter):
"""
Filter a signal by convolution with a given impulse response function (IRF).
Parameters
----------
irftype : str
Type of IRF to be generated. 'gauss', 'rect', 'triang', 'dgauss', 'custom'.
win_len : float, >0 (> 8/fsamp for 'gaussian')
Duration of the generated IRF in seconds (if irftype is not 'custom')
Optional parameters
-------------------
irf : numpy.array
IRF to be used if irftype is 'custom'
normalize : boolean, default = True
Whether to normalizes the IRF to have unitary area
Returns
-------
signal : EvenlySignal
Filtered signal
"""
def __init__(self, irftype, win_len=0, irf=None, normalize=True):
assert irftype in ['gauss', 'rect', 'triang', 'dgauss', 'custom'],\
"IRF type must be in ['gauss', 'rect', 'triang', 'dgauss', 'custom']"
assert irftype == 'custom' or win_len > 0, "Window length value should be positive"
_Filter.__init__(self, irftype=irftype, win_len=win_len, irf=irf, normalize=normalize)
# TODO (Andrea): TEST normalization and results
@classmethod
def algorithm(cls, signal, params):
irftype = params["irftype"]
normalize = params["normalize"]
fsamp = signal.get_sampling_freq()
irf = None
if irftype == 'custom':
if 'irf' not in params:
cls.error("'irf' parameter missing.")
return signal
else:
irf = _np.array(params["irf"])
n = len(irf)
else:
if 'win_len' not in params:
cls.error("'win_len' parameter missing.")
return signal
else:
n = int(params['win_len'] * fsamp)
if irftype == 'gauss':
if n < 8:
# TODO (Andrea): test, sometimes it returns nan
cls.error(
"'win_len' too short to generate a gaussian IRF, expected > " + str(_np.ceil(8 / fsamp)))
std = _np.floor(n / 8)
irf = _gaussian(n, std)
elif irftype == 'rect':
irf = _np.ones(n)
elif irftype == 'triang':
irf_1 = _np.arange(n // 2)
irf_2 = irf_1[-1] - _np.arange(n // 2)
if n % 2 == 0:
irf = _np.r_[irf_1, irf_2]
else:
irf = _np.r_[irf_1, irf_1[-1] + 1, irf_2]
elif irftype == 'dgauss':
std = _np.round(n / 8)
g = _gaussian(n, std)
irf = _np.diff(g)
# NORMALIZE
if normalize:
irf = irf / _np.sum(irf)
signal_ = _np.r_[_np.ones(n) * signal[0], signal, _np.ones(n) * signal[-1]] # TESTME
signal_f = _np.convolve(signal_, irf, mode='same')
signal_out = signal.clone_properties(signal_f[n:-n])
return signal_out
@classmethod
def plot(cls):
pass
class DeConvolutionalFilter(_Filter):
"""
Filter a signal by deconvolution with a given impulse response function (IRF).
Parameters
----------
irf : numpy.array
IRF used to deconvolve the signal
Optional parameters
-------------------
normalize : boolean, default = True
Whether to normalize the IRF to have unitary area
deconv_method : str, default = 'sps'
Available methods: 'fft', 'sps'. 'fft' uses the fourier transform, 'sps' uses the scipy.signal.deconvolve
function
Returns
-------
signal : EvenlySignal
Filtered signal
"""
def __init__(self, irf, normalize=True, deconv_method='sps'):
# TODO (Andrea): "check that irf[0]>0 to avoid scipy BUG" is it normal? Need to put a check?
assert deconv_method in ['fft', 'sps'], "Deconvolution method not valid"
_Filter.__init__(self, irf=irf, normalize=normalize, deconv_method=deconv_method)
@classmethod
def algorithm(cls, signal, params):
irf = params["irf"]
normalize = params["normalize"]
deconvolution_method = params["deconv_method"]
if normalize:
irf = irf / _np.sum(irf)
if deconvolution_method == 'fft':
l = len(signal)
fft_signal = _np.fft.fft(signal, n=l)
fft_irf = _np.fft.fft(irf, n=l)
out = _np.fft.ifft(fft_signal / fft_irf)
elif deconvolution_method == 'sps':
cls.warn('sps based deconvolution needs to be tested. Use carefully.')
out, _ = _deconvolve(signal, irf)
else:
cls.error('Deconvolution method not implemented. Returning original signal.')
out = signal.get_values()
out_signal = signal.clone_properties(abs(out))
return out_signal
def plot(self):
_plot(self._params['irf'])
|
MPBA/pyHRV
|
pyphysio/filters/Filters.py
|
Python
|
gpl-3.0
| 20,241
|
[
"Gaussian"
] |
f5402789aa80d5029e7717cb30ad71f69a7db320cc8e3ca442ad921b8e2d52ec
|
from __future__ import with_statement
import logging
import multiprocessing
import re
import sys
import threading
import time
import unittest
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
try:
from test.test_support import run_unittest
except ImportError:
from test.support import run_unittest
if sys.version_info < (3, 0):
next = lambda x: x.next()
if sys.platform.startswith('win'):
import ctypes
import ctypes.wintypes
from concurrent import futures
from concurrent.futures._base import (
PENDING, RUNNING, CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED, Future,
LOGGER, STDERR_HANDLER)
def create_future(state=PENDING, exception=None, result=None):
f = Future()
f._state = state
f._exception = exception
f._result = result
return f
PENDING_FUTURE = create_future(state=PENDING)
RUNNING_FUTURE = create_future(state=RUNNING)
CANCELLED_FUTURE = create_future(state=CANCELLED)
CANCELLED_AND_NOTIFIED_FUTURE = create_future(state=CANCELLED_AND_NOTIFIED)
EXCEPTION_FUTURE = create_future(state=FINISHED, exception=IOError())
SUCCESSFUL_FUTURE = create_future(state=FINISHED, result=42)
def mul(x, y):
return x * y
class Call(object):
"""A call that can be submitted to a future.Executor for testing.
The call signals when it is called and waits for an event before finishing.
"""
CALL_LOCKS = {}
def _create_event(self):
if sys.platform.startswith('win'):
class SECURITY_ATTRIBUTES(ctypes.Structure):
_fields_ = [("nLength", ctypes.wintypes.DWORD),
("lpSecurityDescriptor", ctypes.wintypes.LPVOID),
("bInheritHandle", ctypes.wintypes.BOOL)]
s = SECURITY_ATTRIBUTES()
s.nLength = ctypes.sizeof(s)
s.lpSecurityDescriptor = None
s.bInheritHandle = True
handle = ctypes.windll.kernel32.CreateEventA(ctypes.pointer(s),
True,
False,
None)
assert handle is not None
return handle
else:
event = multiprocessing.Event()
self.CALL_LOCKS[id(event)] = event
return id(event)
def _wait_on_event(self, handle):
if sys.platform.startswith('win'):
r = ctypes.windll.kernel32.WaitForSingleObject(handle, 5 * 1000)
assert r == 0
else:
self.CALL_LOCKS[handle].wait()
def _signal_event(self, handle):
if sys.platform.startswith('win'):
r = ctypes.windll.kernel32.SetEvent(handle)
assert r != 0
else:
self.CALL_LOCKS[handle].set()
def __init__(self, manual_finish=False, result=42):
self._called_event = self._create_event()
self._can_finish = self._create_event()
self._result = result
if not manual_finish:
self._signal_event(self._can_finish)
def wait_on_called(self):
self._wait_on_event(self._called_event)
def set_can(self):
self._signal_event(self._can_finish)
def __call__(self):
self._signal_event(self._called_event)
self._wait_on_event(self._can_finish)
return self._result
def close(self):
self.set_can()
if sys.platform.startswith('win'):
ctypes.windll.kernel32.CloseHandle(self._called_event)
ctypes.windll.kernel32.CloseHandle(self._can_finish)
else:
del self.CALL_LOCKS[self._called_event]
del self.CALL_LOCKS[self._can_finish]
class ExceptionCall(Call):
def __call__(self):
self._signal_event(self._called_event)
self._wait_on_event(self._can_finish)
raise ZeroDivisionError()
class MapCall(Call):
def __init__(self, result=42):
super(MapCall, self).__init__(manual_finish=True, result=result)
def __call__(self, manual_finish):
if manual_finish:
super(MapCall, self).__call__()
return self._result
class ExecutorShutdownTest(unittest.TestCase):
def test_run_after_shutdown(self):
self.executor.shutdown()
self.assertRaises(RuntimeError,
self.executor.submit,
pow, 2, 5)
def _start_some_futures(self):
call1 = Call(manual_finish=True)
call2 = Call(manual_finish=True)
call3 = Call(manual_finish=True)
try:
self.executor.submit(call1)
self.executor.submit(call2)
self.executor.submit(call3)
call1.wait_on_called()
call2.wait_on_called()
call3.wait_on_called()
call1.set_can()
call2.set_can()
call3.set_can()
finally:
call1.close()
call2.close()
call3.close()
class ThreadPoolShutdownTest(ExecutorShutdownTest):
def setUp(self):
self.executor = futures.ThreadPoolExecutor(max_workers=5)
def tearDown(self):
self.executor.shutdown(wait=True)
def test_threads_terminate(self):
self._start_some_futures()
self.assertEqual(len(self.executor._threads), 3)
self.executor.shutdown()
for t in self.executor._threads:
t.join()
def test_context_manager_shutdown(self):
with futures.ThreadPoolExecutor(max_workers=5) as e:
executor = e
self.assertEqual(list(e.map(abs, range(-5, 5))),
[5, 4, 3, 2, 1, 0, 1, 2, 3, 4])
for t in executor._threads:
t.join()
def test_del_shutdown(self):
executor = futures.ThreadPoolExecutor(max_workers=5)
executor.map(abs, range(-5, 5))
threads = executor._threads
del executor
for t in threads:
t.join()
class ProcessPoolShutdownTest(ExecutorShutdownTest):
def setUp(self):
self.executor = futures.ProcessPoolExecutor(max_workers=5)
def tearDown(self):
self.executor.shutdown(wait=True)
def test_processes_terminate(self):
self._start_some_futures()
self.assertEqual(len(self.executor._processes), 5)
processes = self.executor._processes
self.executor.shutdown()
for p in processes:
p.join()
def test_context_manager_shutdown(self):
with futures.ProcessPoolExecutor(max_workers=5) as e:
executor = e
self.assertEqual(list(e.map(abs, range(-5, 5))),
[5, 4, 3, 2, 1, 0, 1, 2, 3, 4])
for p in self.executor._processes:
p.join()
def test_del_shutdown(self):
executor = futures.ProcessPoolExecutor(max_workers=5)
list(executor.map(abs, range(-5, 5)))
queue_management_thread = executor._queue_management_thread
processes = executor._processes
del executor
queue_management_thread.join()
for p in processes:
p.join()
class WaitTests(unittest.TestCase):
def test_first_completed(self):
def wait_test():
while not future1._waiters:
pass
call1.set_can()
call1 = Call(manual_finish=True)
call2 = Call(manual_finish=True)
try:
future1 = self.executor.submit(call1)
future2 = self.executor.submit(call2)
t = threading.Thread(target=wait_test)
t.start()
done, not_done = futures.wait(
[CANCELLED_FUTURE, future1, future2],
return_when=futures.FIRST_COMPLETED)
self.assertEquals(set([future1]), done)
self.assertEquals(set([CANCELLED_FUTURE, future2]), not_done)
finally:
call1.close()
call2.close()
def test_first_completed_one_already_completed(self):
call1 = Call(manual_finish=True)
try:
future1 = self.executor.submit(call1)
finished, pending = futures.wait(
[SUCCESSFUL_FUTURE, future1],
return_when=futures.FIRST_COMPLETED)
self.assertEquals(set([SUCCESSFUL_FUTURE]), finished)
self.assertEquals(set([future1]), pending)
finally:
call1.close()
def test_first_exception(self):
def wait_test():
while not future1._waiters:
pass
call1.set_can()
call2.set_can()
call1 = Call(manual_finish=True)
call2 = ExceptionCall(manual_finish=True)
call3 = Call(manual_finish=True)
try:
future1 = self.executor.submit(call1)
future2 = self.executor.submit(call2)
future3 = self.executor.submit(call3)
t = threading.Thread(target=wait_test)
t.start()
finished, pending = futures.wait(
[future1, future2, future3],
return_when=futures.FIRST_EXCEPTION)
self.assertEquals(set([future1, future2]), finished)
self.assertEquals(set([future3]), pending)
finally:
call1.close()
call2.close()
call3.close()
def test_first_exception_some_already_complete(self):
def wait_test():
while not future1._waiters:
pass
call1.set_can()
call1 = ExceptionCall(manual_finish=True)
call2 = Call(manual_finish=True)
try:
future1 = self.executor.submit(call1)
future2 = self.executor.submit(call2)
t = threading.Thread(target=wait_test)
t.start()
finished, pending = futures.wait(
[SUCCESSFUL_FUTURE,
CANCELLED_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
future1, future2],
return_when=futures.FIRST_EXCEPTION)
self.assertEquals(set([SUCCESSFUL_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
future1]), finished)
self.assertEquals(set([CANCELLED_FUTURE, future2]), pending)
finally:
call1.close()
call2.close()
def test_first_exception_one_already_failed(self):
call1 = Call(manual_finish=True)
try:
future1 = self.executor.submit(call1)
finished, pending = futures.wait(
[EXCEPTION_FUTURE, future1],
return_when=futures.FIRST_EXCEPTION)
self.assertEquals(set([EXCEPTION_FUTURE]), finished)
self.assertEquals(set([future1]), pending)
finally:
call1.close()
def test_all_completed(self):
def wait_test():
while not future1._waiters:
pass
call1.set_can()
call2.set_can()
call1 = Call(manual_finish=True)
call2 = Call(manual_finish=True)
try:
future1 = self.executor.submit(call1)
future2 = self.executor.submit(call2)
t = threading.Thread(target=wait_test)
t.start()
finished, pending = futures.wait(
[future1, future2],
return_when=futures.ALL_COMPLETED)
self.assertEquals(set([future1, future2]), finished)
self.assertEquals(set(), pending)
finally:
call1.close()
call2.close()
def test_all_completed_some_already_completed(self):
def wait_test():
while not future1._waiters:
pass
future4.cancel()
call1.set_can()
call2.set_can()
call3.set_can()
self.assertTrue(
futures.process.EXTRA_QUEUED_CALLS <= 1,
'this test assumes that future4 will be cancelled before it is '
'queued to run - which might not be the case if '
'ProcessPoolExecutor is too aggresive in scheduling futures')
call1 = Call(manual_finish=True)
call2 = Call(manual_finish=True)
call3 = Call(manual_finish=True)
call4 = Call(manual_finish=True)
try:
future1 = self.executor.submit(call1)
future2 = self.executor.submit(call2)
future3 = self.executor.submit(call3)
future4 = self.executor.submit(call4)
t = threading.Thread(target=wait_test)
t.start()
finished, pending = futures.wait(
[SUCCESSFUL_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
future1, future2, future3, future4],
return_when=futures.ALL_COMPLETED)
self.assertEquals(set([SUCCESSFUL_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
future1, future2, future3, future4]),
finished)
self.assertEquals(set(), pending)
finally:
call1.close()
call2.close()
call3.close()
call4.close()
def test_timeout(self):
def wait_test():
while not future1._waiters:
pass
call1.set_can()
call1 = Call(manual_finish=True)
call2 = Call(manual_finish=True)
try:
future1 = self.executor.submit(call1)
future2 = self.executor.submit(call2)
t = threading.Thread(target=wait_test)
t.start()
finished, pending = futures.wait(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1, future2],
timeout=1,
return_when=futures.ALL_COMPLETED)
self.assertEquals(set([CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1]), finished)
self.assertEquals(set([future2]), pending)
finally:
call1.close()
call2.close()
class ThreadPoolWaitTests(WaitTests):
def setUp(self):
self.executor = futures.ThreadPoolExecutor(max_workers=1)
def tearDown(self):
self.executor.shutdown(wait=True)
class ProcessPoolWaitTests(WaitTests):
def setUp(self):
self.executor = futures.ProcessPoolExecutor(max_workers=1)
def tearDown(self):
self.executor.shutdown(wait=True)
class AsCompletedTests(unittest.TestCase):
# TODO(brian@sweetapp.com): Should have a test with a non-zero timeout.
def test_no_timeout(self):
def wait_test():
while not future1._waiters:
pass
call1.set_can()
call2.set_can()
call1 = Call(manual_finish=True)
call2 = Call(manual_finish=True)
try:
future1 = self.executor.submit(call1)
future2 = self.executor.submit(call2)
t = threading.Thread(target=wait_test)
t.start()
completed = set(futures.as_completed(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1, future2]))
self.assertEquals(set(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1, future2]),
completed)
finally:
call1.close()
call2.close()
def test_zero_timeout(self):
call1 = Call(manual_finish=True)
try:
future1 = self.executor.submit(call1)
completed_futures = set()
try:
for future in futures.as_completed(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1],
timeout=0):
completed_futures.add(future)
except futures.TimeoutError:
pass
self.assertEquals(set([CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE]),
completed_futures)
finally:
call1.close()
class ThreadPoolAsCompletedTests(AsCompletedTests):
def setUp(self):
self.executor = futures.ThreadPoolExecutor(max_workers=1)
def tearDown(self):
self.executor.shutdown(wait=True)
class ProcessPoolAsCompletedTests(AsCompletedTests):
def setUp(self):
self.executor = futures.ProcessPoolExecutor(max_workers=1)
def tearDown(self):
self.executor.shutdown(wait=True)
class ExecutorTest(unittest.TestCase):
# Executor.shutdown() and context manager usage is tested by
# ExecutorShutdownTest.
def test_submit(self):
future = self.executor.submit(pow, 2, 8)
self.assertEquals(256, future.result())
def test_submit_keyword(self):
future = self.executor.submit(mul, 2, y=8)
self.assertEquals(16, future.result())
def test_map(self):
self.assertEqual(
list(self.executor.map(pow, range(10), range(10))),
list(map(pow, range(10), range(10))))
def test_map_exception(self):
i = self.executor.map(divmod, [1, 1, 1, 1], [2, 3, 0, 5])
self.assertEqual(next(i), (0, 1))
self.assertEqual(next(i), (0, 1))
self.assertRaises(ZeroDivisionError, next, i)
def test_map_timeout(self):
results = []
timeout_call = MapCall()
try:
try:
for i in self.executor.map(timeout_call,
[False, False, True],
timeout=1):
results.append(i)
except futures.TimeoutError:
pass
else:
self.fail('expected TimeoutError')
finally:
timeout_call.close()
self.assertEquals([42, 42], results)
class ThreadPoolExecutorTest(ExecutorTest):
def setUp(self):
self.executor = futures.ThreadPoolExecutor(max_workers=1)
def tearDown(self):
self.executor.shutdown(wait=True)
class ProcessPoolExecutorTest(ExecutorTest):
def setUp(self):
self.executor = futures.ProcessPoolExecutor(max_workers=1)
def tearDown(self):
self.executor.shutdown(wait=True)
class FutureTests(unittest.TestCase):
def test_done_callback_with_result(self):
self.callback_result = None
def fn(callback_future):
self.callback_result = callback_future.result()
f = Future()
f.add_done_callback(fn)
f.set_result(5)
self.assertEquals(5, self.callback_result)
def test_done_callback_with_exception(self):
self.callback_exception = None
def fn(callback_future):
self.callback_exception = callback_future.exception()
f = Future()
f.add_done_callback(fn)
f.set_exception(Exception('test'))
self.assertEquals(('test',), self.callback_exception.args)
def test_done_callback_with_cancel(self):
self.was_cancelled = None
def fn(callback_future):
self.was_cancelled = callback_future.cancelled()
f = Future()
f.add_done_callback(fn)
self.assertTrue(f.cancel())
self.assertTrue(self.was_cancelled)
def test_done_callback_raises(self):
LOGGER.removeHandler(STDERR_HANDLER)
logging_stream = StringIO()
handler = logging.StreamHandler(logging_stream)
LOGGER.addHandler(handler)
try:
self.raising_was_called = False
self.fn_was_called = False
def raising_fn(callback_future):
self.raising_was_called = True
raise Exception('doh!')
def fn(callback_future):
self.fn_was_called = True
f = Future()
f.add_done_callback(raising_fn)
f.add_done_callback(fn)
f.set_result(5)
self.assertTrue(self.raising_was_called)
self.assertTrue(self.fn_was_called)
self.assertTrue('Exception: doh!' in logging_stream.getvalue())
finally:
LOGGER.removeHandler(handler)
LOGGER.addHandler(STDERR_HANDLER)
def test_done_callback_already_successful(self):
self.callback_result = None
def fn(callback_future):
self.callback_result = callback_future.result()
f = Future()
f.set_result(5)
f.add_done_callback(fn)
self.assertEquals(5, self.callback_result)
def test_done_callback_already_failed(self):
self.callback_exception = None
def fn(callback_future):
self.callback_exception = callback_future.exception()
f = Future()
f.set_exception(Exception('test'))
f.add_done_callback(fn)
self.assertEquals(('test',), self.callback_exception.args)
def test_done_callback_already_cancelled(self):
self.was_cancelled = None
def fn(callback_future):
self.was_cancelled = callback_future.cancelled()
f = Future()
self.assertTrue(f.cancel())
f.add_done_callback(fn)
self.assertTrue(self.was_cancelled)
def test_repr(self):
self.assertTrue(re.match('<Future at 0x[0-9a-f]+L? state=pending>',
repr(PENDING_FUTURE)))
self.assertTrue(re.match('<Future at 0x[0-9a-f]+L? state=running>',
repr(RUNNING_FUTURE)))
self.assertTrue(re.match('<Future at 0x[0-9a-f]+L? state=cancelled>',
repr(CANCELLED_FUTURE)))
self.assertTrue(re.match('<Future at 0x[0-9a-f]+L? state=cancelled>',
repr(CANCELLED_AND_NOTIFIED_FUTURE)))
self.assertTrue(re.match(
'<Future at 0x[0-9a-f]+L? state=finished raised IOError>',
repr(EXCEPTION_FUTURE)))
self.assertTrue(re.match(
'<Future at 0x[0-9a-f]+L? state=finished returned int>',
repr(SUCCESSFUL_FUTURE)))
def test_cancel(self):
f1 = create_future(state=PENDING)
f2 = create_future(state=RUNNING)
f3 = create_future(state=CANCELLED)
f4 = create_future(state=CANCELLED_AND_NOTIFIED)
f5 = create_future(state=FINISHED, exception=IOError())
f6 = create_future(state=FINISHED, result=5)
self.assertTrue(f1.cancel())
self.assertEquals(f1._state, CANCELLED)
self.assertFalse(f2.cancel())
self.assertEquals(f2._state, RUNNING)
self.assertTrue(f3.cancel())
self.assertEquals(f3._state, CANCELLED)
self.assertTrue(f4.cancel())
self.assertEquals(f4._state, CANCELLED_AND_NOTIFIED)
self.assertFalse(f5.cancel())
self.assertEquals(f5._state, FINISHED)
self.assertFalse(f6.cancel())
self.assertEquals(f6._state, FINISHED)
def test_cancelled(self):
self.assertFalse(PENDING_FUTURE.cancelled())
self.assertFalse(RUNNING_FUTURE.cancelled())
self.assertTrue(CANCELLED_FUTURE.cancelled())
self.assertTrue(CANCELLED_AND_NOTIFIED_FUTURE.cancelled())
self.assertFalse(EXCEPTION_FUTURE.cancelled())
self.assertFalse(SUCCESSFUL_FUTURE.cancelled())
def test_done(self):
self.assertFalse(PENDING_FUTURE.done())
self.assertFalse(RUNNING_FUTURE.done())
self.assertTrue(CANCELLED_FUTURE.done())
self.assertTrue(CANCELLED_AND_NOTIFIED_FUTURE.done())
self.assertTrue(EXCEPTION_FUTURE.done())
self.assertTrue(SUCCESSFUL_FUTURE.done())
def test_running(self):
self.assertFalse(PENDING_FUTURE.running())
self.assertTrue(RUNNING_FUTURE.running())
self.assertFalse(CANCELLED_FUTURE.running())
self.assertFalse(CANCELLED_AND_NOTIFIED_FUTURE.running())
self.assertFalse(EXCEPTION_FUTURE.running())
self.assertFalse(SUCCESSFUL_FUTURE.running())
def test_result_with_timeout(self):
self.assertRaises(futures.TimeoutError,
PENDING_FUTURE.result, timeout=0)
self.assertRaises(futures.TimeoutError,
RUNNING_FUTURE.result, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_FUTURE.result, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_AND_NOTIFIED_FUTURE.result, timeout=0)
self.assertRaises(IOError, EXCEPTION_FUTURE.result, timeout=0)
self.assertEqual(SUCCESSFUL_FUTURE.result(timeout=0), 42)
def test_result_with_success(self):
# TODO(brian@sweetapp.com): This test is timing dependant.
def notification():
# Wait until the main thread is waiting for the result.
time.sleep(1)
f1.set_result(42)
f1 = create_future(state=PENDING)
t = threading.Thread(target=notification)
t.start()
self.assertEquals(f1.result(timeout=5), 42)
def test_result_with_cancel(self):
# TODO(brian@sweetapp.com): This test is timing dependant.
def notification():
# Wait until the main thread is waiting for the result.
time.sleep(1)
f1.cancel()
f1 = create_future(state=PENDING)
t = threading.Thread(target=notification)
t.start()
self.assertRaises(futures.CancelledError, f1.result, timeout=5)
def test_exception_with_timeout(self):
self.assertRaises(futures.TimeoutError,
PENDING_FUTURE.exception, timeout=0)
self.assertRaises(futures.TimeoutError,
RUNNING_FUTURE.exception, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_FUTURE.exception, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_AND_NOTIFIED_FUTURE.exception, timeout=0)
self.assertTrue(isinstance(EXCEPTION_FUTURE.exception(timeout=0),
IOError))
self.assertEqual(SUCCESSFUL_FUTURE.exception(timeout=0), None)
def test_exception_with_success(self):
def notification():
# Wait until the main thread is waiting for the exception.
time.sleep(1)
with f1._condition:
f1._state = FINISHED
f1._exception = IOError()
f1._condition.notify_all()
f1 = create_future(state=PENDING)
t = threading.Thread(target=notification)
t.start()
self.assertTrue(isinstance(f1.exception(timeout=5), IOError))
def test_main():
run_unittest(ProcessPoolExecutorTest,
ThreadPoolExecutorTest,
ProcessPoolWaitTests,
ThreadPoolWaitTests,
ProcessPoolAsCompletedTests,
ThreadPoolAsCompletedTests,
FutureTests,
ProcessPoolShutdownTest,
ThreadPoolShutdownTest)
if __name__ == "__main__":
test_main()
|
santegoeds/pythonfutures
|
test_futures.py
|
Python
|
bsd-2-clause
| 27,721
|
[
"Brian"
] |
073654bcda284b0d4c72bafacdbbdaa3450cd4147b13e7712acb45e0710663c5
|
'''extractPairedReadsRegion.py
Usage:
extractPairedReadsRegion.py <region> <inbam> <outbam>
[--threads=<th>] [--memory=<mm>] [--nosort]
Options:
--threads=<th> Number of threads to use in sort [default: 1].
--memory=<mm> GB of memory per sort thread [default: 6].
--nosort Input BAM is already sorted by name.
'''
# Load required modules
from general_python import docopt
from ngs_python.bam.samtools import sort as bamsort
import os
import pysam
import subprocess
# Process arguments
args = docopt.docopt(__doc__,version = 'v1')
args['--threads'] = int(args['--threads'])
if args['--threads'] < 1:
raise ValueError('Must be at least 1 thread')
args['--memory'] = int(args['--memory'])
if args['--memory'] < 1:
raise ValueError('Must be at least 1 GB of memory')
args['<chrom>'], interval = args['<region>'].split(':')
args['<start>'], args['<end>'] = interval.split('-')
args['<start>'] = int(args['<start>']) - 1
args['<end>'] = int(args['<end>'])
if not args['<inbam>'].endswith('.bam'):
raise IOError('Unexpected input file name')
args['<inbam>'] = os.path.abspath(args['<inbam>'])
if not args['<outbam>'].endswith('.bam'):
raise IOError('Unexpected output file name')
args['<outbam>'] = os.path.abspath(args['<outbam>'])
# Name sort input BAM file if required
if args['--nosort']:
args['<nsortbam>'] = args['<inbam>']
else:
args['<nsortbam>'] = args['<outbam>'][:-4] + '.nsort.bam'
inputSortCommand = bamsort(
inFile=args['<inbam>'], outFile=args['<nsortbam>'], name=True,
threads=args['--threads'], memory=args['--memory'], delete=False
)
subprocess.check_call(inputSortCommand, shell=True)
# Create temporary output bam file and extract tid
args['<filtbam>'] = args['<outbam>'][:-4] + '.filtered.bam'
sortbam = pysam.AlignmentFile(args['<nsortbam>'], 'rb')
args['<tid>'] = sortbam.get_tid(args['<chrom>'])
filtbam = pysam.AlignmentFile(args['<filtbam>'], 'wb', sortbam)
# Extract reads from input BAM
currentName = ''
readList = []
while True:
# Extract read data
try:
nextRead = sortbam.next()
nextName = nextRead.query_name
except StopIteration:
nextName = None
# Process reads of the same name
if nextName != currentName:
# Determine if one read maps to region of interest
passed = False
for read in readList:
if read.is_unmapped:
continue
elif read.reference_id != args['<tid>']:
continue
elif read.reference_start >= args['<end>']:
continue
elif read.reference_end <= args['<start>']:
continue
else:
passed = True
break
# Write mapped reads to file
if passed:
for read in readList:
filtbam.write(read)
# Reset read list and current name
if nextName == None:
break
else:
currentName = nextName
readList = [nextRead]
# Create list of reads of the same name
else:
readList.append(nextRead)
# Close BAM files
sortbam.close()
filtbam.close()
# Delete temporary name sorted bam
if not args['--nosort']:
os.remove(args['<nsortbam>'])
# Sort output bam file
outputSortCommand = bamsort(
inFile=args['<filtbam>'], outFile=args['<outbam>'], name=False,
threads=args['--threads'], memory=args['--memory'], delete=True
)
subprocess.check_call(outputSortCommand, shell=True)
|
adam-rabinowitz/ngs_analysis
|
scripts/BAM/extractPairedReadsRegion.py
|
Python
|
gpl-2.0
| 3,523
|
[
"pysam"
] |
1814015131af9a0c8745c97949a1e47991f0ccea5a801caab08076f06294b500
|
"""Perform a simultaneous fit to two frequency distributions
(= histograms) with common parameters with kafe2.MultiFit()
This example illustrates another common use-case for multifits,
where the same signal is measured under varying conditions,
e.g. in different detector regions with different resolutions
and background levels.
Consider the distribution of a signal on top of a flat background.
Additional smearing is added to the "true" data values. A second,
similar set of data at the same position and with the same width
is generated, albeit with a differing number of signal events,
smaller signal fraction and less resolution smearing.
A simultaneous fit using the kafe2 MultiFit feature is then performed
to extract the position and raw width common to the two data sets.
*Note*: in this simple case of two independent frequency distributions
the results for the common parameters could also be determined by
combination of the results from two individual fits to each of the
histograms.
"""
from kafe2 import Fit, Plot, HistContainer, MultiFit
import numpy as np
import matplotlib.pyplot as plt
# function fo generate the signal-plus-background distributions
def generate_data(N, min, max, pos, width, s):
"""generate a random dataset:
Gaussian signal at position p with width w and signal fraction s
on top of a flat background between min and max
"""
# signal sample
data_s = np.random.normal(loc=pos, scale=width, size=int(s * N))
# background sample
data_b = np.random.uniform(low=min, high=max, size=int((1 - s) * N))
return np.concatenate((data_s, data_b))
# the fit functions, one for each version of the distribution with
# different resolution and signal fraction
#
def SplusBmodel1(x, mu=5., width=0.3, res1=0.3, sf1=0.5):
"""pdf of a Gaussian signal at position mu, with natural width width,
resolution res1 and signal fraction sf1 on a flat background
"""
sigma2 = width * width + res1 * res1
normal = np.exp(-0.5 * (x - mu) ** 2 / sigma2) / np.sqrt(2.0 * np.pi * sigma2)
flat = 1. / (max - min)
return sf1 * normal + (1 - sf1) * flat
def SplusBmodel2(x, mu=5., width=0.3, res2=0.3, sf2=0.5):
"""pdf of a Gaussian signal at position mu, with natural width width,
resolution res2 and signal fraction sf2 on a flat background
"""
sigma2 = width * width + res2 * res2
normal = np.exp(-0.5 * (x - mu) ** 2 / sigma2) / np.sqrt(2.0 * np.pi * sigma2)
flat = 1. / (max - min)
return sf2 * normal + (1 - sf2) * flat
# --- generate data sets, set up and perform fit
min = 0.
max = 10.
pos = 6.66
width = 0.33
# -- generate a first data set
s1 = 0.8
N1 = 200
r1 = 2 * width # smearing twice as large as natural width
SplusB_raw1 = generate_data(N1, min, max, pos, width, s1)
# apply resolution smearing to data set SplusB_data
SplusB_data1 = SplusB_raw1 + np.random.normal(loc=0., scale=r1, size=len(SplusB_raw1))
# -- generate a second data set at the same position and width,
# but with smaller signal fraction, better resolution and more events
s2 = 0.25
N2 = 500
r2 = width / 3.
SplusB_raw2 = generate_data(N2, min, max, pos, width, s2)
SplusB_data2 = SplusB_raw2 + np.random.normal(loc=0., scale=r2, size=len(SplusB_raw2))
# -- Create histogram containers from the two datasets
SplusB_histogram1 = HistContainer(n_bins=30, bin_range=(min, max), fill_data=SplusB_data1)
SplusB_histogram2 = HistContainer(n_bins=50, bin_range=(min, max), fill_data=SplusB_data2)
# -- create Fit objects by specifying their density functions with corresponding parameters
hist_fit1 = Fit(data=SplusB_histogram1, model_function=SplusBmodel1)
hist_fit2 = Fit(data=SplusB_histogram2, model_function=SplusBmodel2)
# to make the fit unambiguous,
# external knowledge on the resolutions must be applied
hist_fit1.add_parameter_constraint(name='res1', value=r1, uncertainty=r1 / 4.)
hist_fit2.add_parameter_constraint(name='res2', value=r2, uncertainty=r2 / 2.)
# -- test: perform individual fits
print('\n*==* Result of fit to first histogram')
hist_fit1.do_fit()
hist_fit1.report()
print('\n*==* Result of fit to second histogram')
hist_fit2.do_fit()
hist_fit2.report()
# combine the two fits to a MultiFit
multi_fit = MultiFit(fit_list=[hist_fit1, hist_fit2])
multi_fit.do_fit() # do the fit
print('\n*==* Result of multi-fit to both histograms')
multi_fit.report() # Optional: print a report to the terminal
# Optional: create output graphics
multi_plot = Plot(multi_fit, separate_figures=True)
multi_plot.plot(asymmetric_parameter_errors=True)
plt.show()
|
dsavoiu/kafe2
|
examples/011_multifit/03_multifit2.py
|
Python
|
gpl-3.0
| 4,626
|
[
"Gaussian"
] |
933501bfecc1ffdd726ff9f4045d699383bf5e5e748b2e8407646f030164d43b
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Cell',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('cell_name', models.CharField(default=b'Generic', unique=True, max_length=300)),
('cell_type', models.CharField(default=b'Generic', max_length=300, choices=[(b'Muscle', b'Muscle'), (b'Neuron', b'Neuron'), (b'Motor Neuron', b'Motor Neuron'), (b'Xenopus Oocyte', b'Xenopus Oocyte'), (b'Generic', b'Generic')])),
('membrane_capacitance', models.FloatField(max_length=200, null=True, verbose_name=b'Capacitance of the membrane (F)', blank=True)),
('specific_capacitance', models.FloatField(default=0.01, null=True, verbose_name=b'Specific capacitance of the membrane (F/m2)', blank=True)),
('area', models.FloatField(default=6e-09, null=True, verbose_name=b'Total area of the cell (m2)', blank=True)),
],
),
migrations.CreateModel(
name='CellChannel',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('channel_density', models.FloatField(null=True, verbose_name=b'Density of the channel in cell (1/m2)', blank=True)),
('cell', models.ForeignKey(to='ion_channel.Cell')),
],
),
migrations.CreateModel(
name='Experiment',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('create_date', models.DateTimeField()),
('last_update', models.DateTimeField(auto_now=True)),
('comments', models.TextField(null=True, blank=True)),
],
),
migrations.CreateModel(
name='Graph',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('mutants', models.CharField(max_length=300, null=True, verbose_name=b'Additional ion channel mutants (e.g. nf100,n582)', blank=True)),
('x_axis_type', models.CharField(max_length=50, choices=[(b'I', b'Current'), (b'I_ss', b'Steady-state Current'), (b'I_peak', b'Peak Current'), (b'I_norm', b'Normalized Current'), (b'V', b'Voltage'), (b'T', b'Time'), (b'G', b'Conductance'), (b'G/G_max', b'G/G_max'), (b'Po', b'Open Probability'), (b'Ca_concentration', b'Calcium Concentration'), (b'Cl_concentration', b'Chloride Concentration'), (b'Bar', b'Bar Chart')])),
('x_axis_unit', models.CharField(max_length=50, verbose_name=b'Axis unit in the original figure (e.g. ms)')),
('x_axis_toSI', models.FloatField(default=1, verbose_name=b'Multiply by this value to convert to SI (e.g. 1e-3)')),
('y_axis_type', models.CharField(max_length=50, choices=[(b'I', b'Current'), (b'I_ss', b'Steady-state Current'), (b'I_peak', b'Peak Current'), (b'I_norm', b'Normalized Current'), (b'V', b'Voltage'), (b'T', b'Time'), (b'G', b'Conductance'), (b'G/G_max', b'G/G_max'), (b'Po', b'Open Probability'), (b'Ca_concentration', b'Calcium Concentration'), (b'Cl_concentration', b'Chloride Concentration'), (b'Bar', b'Bar Chart')])),
('y_axis_unit', models.CharField(max_length=50, verbose_name=b'Axis unit in the original figure (e.g. mV)')),
('y_axis_toSI', models.FloatField(default=1, verbose_name=b'Multiply by this value to convert to SI (e.g. 1e-3)')),
('figure_ref_address', models.CharField(max_length=50, verbose_name=b'Figure number (e.g. 2A)')),
('figure_ref_caption', models.TextField(verbose_name=b'Figure caption')),
('file', models.ImageField(upload_to=b'ion_channel/graph/%Y/%m/%d')),
('experiment', models.ForeignKey(blank=True, to='ion_channel.Experiment', null=True)),
],
),
migrations.CreateModel(
name='GraphData',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('series_name', models.CharField(max_length=200)),
('series_data', models.TextField()),
('graph', models.ForeignKey(to='ion_channel.Graph')),
],
),
migrations.CreateModel(
name='IonChannel',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('channel_name', models.CharField(max_length=300, null=True)),
('description', models.TextField(null=True, blank=True)),
('description_evidences', models.TextField(null=True, verbose_name=b'PMID for description evidence', blank=True)),
('channel_type', models.CharField(blank=True, max_length=300, null=True, choices=[(b'Ca', b'Calcium Channel'), (b'K', b'Potassium Channel')])),
('channel_subtype', models.CharField(max_length=300, null=True, blank=True)),
('ion_type', models.CharField(blank=True, max_length=200, null=True, choices=[(b'Ca', b'Calcium'), (b'K', b'Potassium'), (b'Cl', b'Chloride')])),
('ligand_type', models.CharField(max_length=200, null=True, blank=True)),
('gene_name', models.CharField(max_length=300, null=True, blank=True)),
('gene_WB_ID', models.CharField(max_length=300, null=True, blank=True)),
('gene_class', models.CharField(max_length=300, null=True, blank=True)),
('proteins', models.CharField(max_length=300, null=True, blank=True)),
('protein_sequence', models.TextField(null=True, blank=True)),
('uniprot_ID', models.CharField(max_length=300, null=True, blank=True)),
('pdb_ID', models.CharField(max_length=300, null=True, blank=True)),
('interpro_ID', models.CharField(max_length=300, null=True, blank=True)),
('structure', models.TextField(null=True, blank=True)),
('structure_image', models.ImageField(null=True, upload_to=b'ion_channel/structures/', blank=True)),
('expression_pattern', models.TextField(null=True, blank=True)),
('expression_evidences', models.TextField(null=True, verbose_name=b'PMID for expression evidence', blank=True)),
('last_update', models.DateTimeField(auto_now=True, null=True)),
],
),
migrations.CreateModel(
name='IonChannelModel',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('model_type', models.CharField(default=b'HH', max_length=300, choices=[(b'HH', b'Hodgkin-Huxley'), (b'Markov', b'Markov')])),
('modeling_type', models.CharField(default=b'Experimental', max_length=300, choices=[(b'Experimental', b'Experimental'), (b'Estimated', b'Estimated')])),
('date', models.DateTimeField(auto_now=True)),
('score', models.FloatField(default=None, null=True, verbose_name=b'Evaluated Score', blank=True)),
('neuroML_file', models.FilePathField(null=True, blank=True)),
('channel_name', models.ForeignKey(to='ion_channel.IonChannel')),
('experiment', models.ForeignKey(to='ion_channel.Experiment')),
('graph', models.ForeignKey(to='ion_channel.Graph')),
],
),
migrations.CreateModel(
name='KeyVal',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('key', models.CharField(max_length=240, db_index=True)),
('value', models.CharField(max_length=240, db_index=True)),
],
),
migrations.CreateModel(
name='ParamDict',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='PatchClamp',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('type', models.CharField(max_length=200, choices=[(b'VClamp', b'Voltage-Clamp'), (b'IClamp', b'Current-Clamp')])),
('patch_type', models.CharField(max_length=200, choices=[(b'Whole-cell', b'Whole-cell'), (b'Single-channel', b'Single-channel')])),
('duration', models.FloatField(verbose_name=b'Patch-Clamp Duration (ms)')),
('deltat', models.FloatField(default=0.01, verbose_name=b'Time interval-Deltat (ms)')),
('start_time', models.FloatField(default=0, verbose_name=b'Start time (ms)')),
('end_time', models.FloatField(verbose_name=b'End time (ms) (default=duration)')),
('protocol_start', models.FloatField(verbose_name=b'Initial holding potential or stimulated current (mV or pA)')),
('protocol_end', models.FloatField(verbose_name=b'End of Holding potential or stimulated current (mV or pA)')),
('protocol_step', models.FloatField(verbose_name=b'Steps of Holding potential or stimulated current (mV or pA)')),
('cell_age', models.FloatField(default=None, null=True, verbose_name=b'Age of the cell (days)', blank=True)),
('membrane_capacitance', models.FloatField(max_length=200, null=True, verbose_name=b'Capacitance of the membrane (F)', blank=True)),
('temperature', models.FloatField(default=21, null=True, verbose_name=b'Temperature (Celsius)', blank=True)),
('initial_voltage', models.FloatField(null=True, verbose_name=b'Initial holding potential (mV)', blank=True)),
('Ca_concentration', models.FloatField(default=None, null=True, verbose_name=b'Initial molar concentration of Calcium (uM)', blank=True)),
('Cl_concentration', models.FloatField(default=None, null=True, verbose_name=b'Initial molar concentration of Chloride (mM)', blank=True)),
('mutants', models.CharField(max_length=300, null=True, verbose_name=b'Additional ion channel mutants (e.g. nf100,n582,...)', blank=True)),
('blockers', models.CharField(max_length=300, null=True, verbose_name=b'Ion channel blockers (e.g. 500e-6 Cd2+,...)', blank=True)),
('extra_solution', models.TextField(null=True, verbose_name=b'Extracellular Solution (e.g. 140e-3 NaCl, 5e-3 KCl,...)', blank=True)),
('pipette_solution', models.TextField(null=True, verbose_name=b'Pipette Solution (e.g. 120e-3 KCl, 20e-3 KOH,...)', blank=True)),
('cell', models.ForeignKey(verbose_name=b'Type of the cell (e.g. muscle, ADAL, Xenopus Oocyte)', blank=True, to='ion_channel.Cell', null=True)),
('experiment', models.ForeignKey(to='ion_channel.Experiment')),
('ion_channel', models.ForeignKey(to='ion_channel.IonChannel')),
],
),
migrations.CreateModel(
name='Reference',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('doi', models.CharField(unique=True, max_length=300)),
('PMID', models.CharField(max_length=300, null=True, blank=True)),
('title', models.TextField(null=True, blank=True)),
('citation', models.TextField(null=True, blank=True)),
('year', models.CharField(max_length=300, null=True, blank=True)),
('authors', models.CharField(max_length=300, null=True, blank=True)),
('journal', models.CharField(max_length=300, null=True, blank=True)),
('volume', models.CharField(max_length=300, null=True, blank=True)),
('issue', models.CharField(max_length=300, null=True, blank=True)),
('pages', models.CharField(max_length=300, null=True, blank=True)),
('url', models.URLField(null=True, blank=True)),
('create_date', models.DateTimeField(auto_now=True)),
('subject', models.CharField(max_length=300, choices=[(b'Genomics', b'Genomics'), (b'Proteomics', b'Proteomics'), (b'Electrophysiology', b'Electrophysiology'), (b'Other', b'Other')])),
('file_url', models.URLField(null=True, blank=True)),
('cells', models.ManyToManyField(to='ion_channel.Cell', blank=True)),
('ion_channels', models.ManyToManyField(to='ion_channel.IonChannel', blank=True)),
('username', models.ForeignKey(verbose_name=b'Contributer', to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='keyval',
name='container',
field=models.ForeignKey(to='ion_channel.ParamDict'),
),
migrations.AddField(
model_name='ionchannelmodel',
name='parameters',
field=models.ForeignKey(blank=True, to='ion_channel.ParamDict', null=True),
),
migrations.AddField(
model_name='ionchannelmodel',
name='references',
field=models.ManyToManyField(to='ion_channel.Reference'),
),
migrations.AddField(
model_name='ionchannelmodel',
name='username',
field=models.ForeignKey(verbose_name=b'Contributer', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='graph',
name='ion_channel',
field=models.ManyToManyField(to='ion_channel.IonChannel'),
),
migrations.AddField(
model_name='graph',
name='patch_clamp',
field=models.ForeignKey(blank=True, to='ion_channel.PatchClamp', null=True),
),
migrations.AddField(
model_name='experiment',
name='reference',
field=models.ForeignKey(to='ion_channel.Reference'),
),
migrations.AddField(
model_name='experiment',
name='username',
field=models.ForeignKey(verbose_name=b'Contributer', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='cellchannel',
name='ion_channel',
field=models.ForeignKey(to='ion_channel.IonChannel'),
),
migrations.AddField(
model_name='cellchannel',
name='reference',
field=models.ForeignKey(to='ion_channel.Reference'),
),
migrations.AddField(
model_name='cell',
name='ion_channels',
field=models.ManyToManyField(to='ion_channel.IonChannel', blank=True),
),
]
|
cheelee/ChannelWorm
|
channelworm/ion_channel/migrations/0001_initial.py
|
Python
|
mit
| 15,404
|
[
"NEURON"
] |
342cb0f43d8b04911ee25eea2597f1c7833ed9eefeba19fc817ab08ecee056cf
|
###############################################################################
# Copyright 2016 - Climate Research Division
# Environment and Climate Change Canada
#
# This file is part of the "EC-CAS diags" package.
#
# "EC-CAS diags" is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# "EC-CAS diags" is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with "EC-CAS diags". If not, see <http://www.gnu.org/licenses/>.
###############################################################################
# Interface for reading / writing GEOS-CHEM data that is converted to the
# netCDF COARDS convenction (compatible with GAMAP routine BPCH2COARDS).
from . import DataProduct
class GEOSCHEM_Data(DataProduct):
"""
GEOS-Chem data (converted to netCDF using the BPCH2COARDS utility).
"""
# A and B interface values (for vertical coordinate)
A_interface = dict()
B_interface = dict()
A_interface[47] = [
0.000000E+00, 4.804826E-02, 6.593752E+00, 1.313480E+01,
1.961311E+01, 2.609201E+01, 3.257081E+01, 3.898201E+01,
4.533901E+01, 5.169611E+01, 5.805321E+01, 6.436264E+01,
7.062198E+01, 7.883422E+01, 8.909992E+01, 9.936521E+01,
1.091817E+02, 1.189586E+02, 1.286959E+02, 1.429100E+02,
1.562600E+02, 1.696090E+02, 1.816190E+02, 1.930970E+02,
2.032590E+02, 2.121500E+02, 2.187760E+02, 2.238980E+02,
2.243630E+02, 2.168650E+02, 2.011920E+02, 1.769300E+02,
1.503930E+02, 1.278370E+02, 1.086630E+02, 9.236572E+01,
7.851231E+01, 5.638791E+01, 4.017541E+01, 2.836781E+01,
1.979160E+01, 9.292942E+00, 4.076571E+00, 1.650790E+00,
6.167791E-01, 2.113490E-01, 6.600001E-02, 1.000000E-02
]
B_interface[47] = [
1.000000E+00, 9.849520E-01, 9.634060E-01, 9.418650E-01,
9.203870E-01, 8.989080E-01, 8.774290E-01, 8.560180E-01,
8.346609E-01, 8.133039E-01, 7.919469E-01, 7.706375E-01,
7.493782E-01, 7.211660E-01, 6.858999E-01, 6.506349E-01,
6.158184E-01, 5.810415E-01, 5.463042E-01, 4.945902E-01,
4.437402E-01, 3.928911E-01, 3.433811E-01, 2.944031E-01,
2.467411E-01, 2.003501E-01, 1.562241E-01, 1.136021E-01,
6.372006E-02, 2.801004E-02, 6.960025E-03, 8.175413E-09,
0.000000E+00, 0.000000E+00, 0.000000E+00, 0.000000E+00,
0.000000E+00, 0.000000E+00, 0.000000E+00, 0.000000E+00,
0.000000E+00, 0.000000E+00, 0.000000E+00, 0.000000E+00,
0.000000E+00, 0.000000E+00, 0.000000E+00, 0.000000E+00
]
A_interface[72] = [
0.000000E+00, 4.804826E-02, 6.593752E+00, 1.313480E+01,
1.961311E+01, 2.609201E+01, 3.257081E+01, 3.898201E+01,
4.533901E+01, 5.169611E+01, 5.805321E+01, 6.436264E+01,
7.062198E+01, 7.883422E+01, 8.909992E+01, 9.936521E+01,
1.091817E+02, 1.189586E+02, 1.286959E+02, 1.429100E+02,
1.562600E+02, 1.696090E+02, 1.816190E+02, 1.930970E+02,
2.032590E+02, 2.121500E+02, 2.187760E+02, 2.238980E+02,
2.243630E+02, 2.168650E+02, 2.011920E+02, 1.769300E+02,
1.503930E+02, 1.278370E+02, 1.086630E+02, 9.236572E+01,
7.851231E+01, 6.660341E+01, 5.638791E+01, 4.764391E+01,
4.017541E+01, 3.381001E+01, 2.836781E+01, 2.373041E+01,
1.979160E+01, 1.645710E+01, 1.364340E+01, 1.127690E+01,
9.292942E+00, 7.619842E+00, 6.216801E+00, 5.046801E+00,
4.076571E+00, 3.276431E+00, 2.620211E+00, 2.084970E+00,
1.650790E+00, 1.300510E+00, 1.019440E+00, 7.951341E-01,
6.167791E-01, 4.758061E-01, 3.650411E-01, 2.785261E-01,
2.113490E-01, 1.594950E-01, 1.197030E-01, 8.934502E-02,
6.600001E-02, 4.758501E-02, 3.270000E-02, 2.000000E-02,
1.000000E-02
]
B_interface[72] = [
1.000000E+00, 9.849520E-01, 9.634060E-01, 9.418650E-01,
9.203870E-01, 8.989080E-01, 8.774290E-01, 8.560180E-01,
8.346609E-01, 8.133039E-01, 7.919469E-01, 7.706375E-01,
7.493782E-01, 7.211660E-01, 6.858999E-01, 6.506349E-01,
6.158184E-01, 5.810415E-01, 5.463042E-01, 4.945902E-01,
4.437402E-01, 3.928911E-01, 3.433811E-01, 2.944031E-01,
2.467411E-01, 2.003501E-01, 1.562241E-01, 1.136021E-01,
6.372006E-02, 2.801004E-02, 6.960025E-03, 8.175413E-09,
0.000000E+00, 0.000000E+00, 0.000000E+00, 0.000000E+00,
0.000000E+00, 0.000000E+00, 0.000000E+00, 0.000000E+00,
0.000000E+00, 0.000000E+00, 0.000000E+00, 0.000000E+00,
0.000000E+00, 0.000000E+00, 0.000000E+00, 0.000000E+00,
0.000000E+00, 0.000000E+00, 0.000000E+00, 0.000000E+00,
0.000000E+00, 0.000000E+00, 0.000000E+00, 0.000000E+00,
0.000000E+00, 0.000000E+00, 0.000000E+00, 0.000000E+00,
0.000000E+00, 0.000000E+00, 0.000000E+00, 0.000000E+00,
0.000000E+00, 0.000000E+00, 0.000000E+00, 0.000000E+00,
0.000000E+00, 0.000000E+00, 0.000000E+00, 0.000000E+00,
0.000000E+00
]
# Method to open a single file
@staticmethod
def open_file (filename):
from pygeode.formats import netcdf
from pygeode.dataset import Dataset
dataset = netcdf.open(filename)
# Hack for the grid cell areas - remove time axis.
dataset = list(dataset)
for i, var in enumerate(dataset):
if var.name.startswith('DXYP'):
var = var.squeeze('time')
dataset[i] = var
# Make sure the longitudes are monotonic!
dataset = [d.sorted('lon') for d in dataset]
return Dataset(dataset)
# Method to decode an opened dataset (standardize variable names, and add any
# extra info needed (pressure values, cell area, etc.)
@classmethod
def decode (cls, dataset):
import numpy as np
from pygeode.axis import Hybrid
from pygeode.var import Var
from pygeode.dataset import asdataset
# Use some 'standardized' names, and locate a z-axis.
zaxis = None
dataset = list(dataset)
for i, var in enumerate(dataset):
if var.name.endswith('_CO2'):
var.name = 'CO2'
if var.atts['units'] == "v/v": # From restart file?
var.atts['units'] = "mol mol(dry_air)-1"
else: # From experiment output?
var.atts['units'] = '1E-9 mol mol(dry_air)-1'
if var.name.endswith('_CO'):
var.name = 'CO'
if var.atts['units'] == "v/v": # From restart file?
var.atts['units'] = "mol mol(dry_air)-1"
else: # From experiment output?
var.atts['units'] = '1E-9 mol mol(dry_air)-1'
if var.name == 'CO__SRCE__COanth':
var.name = 'CO_anthropogenic_flux'
var.atts['specie'] = 'CO'
var.atts['units'] = 'molecules cm-2 s-1'
if var.name == 'PORL_L_S__PCH4':
var.name = 'CO_production'
var.atts['units'] = 'molecules cm-3 s-1'
# Treat ppbv units as ppb
if var.atts.get('units',None) == 'ppbv':
var.atts['units'] = 'ppb'
if var.name == 'PSURF' or var.name.endswith('_PSURF') or var.name.endswith('_PS') or var.name.startswith('PEDGE_S'):
# Special case: actually have 3D pressure (erroneously encoded?)
if var.hasaxis('lev'):
# Exception: data is not filled in
# (e.g. GEOS-Chem_CO_CH4_source_2010.nc)
if var[0,-1,0,0] == 0: continue
# Note: this seems to be on interfaces (last level is the surface).
# only keep last level, since we can generate pressure on mid-levels
# from the formula.
var = var.slice[:,0,:,:].squeeze('lev')
var.name = 'surface_pressure'
var.atts['units'] = 'hPa'
if var.name.startswith('GMAO_'):
var.atts['units'] = 'hPa'
var.name = 'surface_pressure'
if var.name.endswith('_QV'):
var.name = 'specific_humidity'
var.atts['units'] = 'kg(H2O) kg(air)-1'
if var.name.endswith('_SPHU'):
var.name = 'specific_humidity'
var.atts['units'] = 'g(H2O) kg(air)-1'
if var.name.startswith('DXYP'):
var.name = 'cell_area'
# Special case: vertical levels that we know the parameters for
if var.hasaxis('lev'):
zaxis = var.getaxis('lev')
dataset[i] = var
# Generate the expected vertical axis
if zaxis is not None:
nlev = len(zaxis)
A_interface = np.array(cls.A_interface[nlev])
B_interface = np.array(cls.B_interface[nlev])
A = (A_interface[:-1] + A_interface[1:]) * 0.5
B = (B_interface[:-1] + B_interface[1:]) * 0.5
dA = (A_interface[:-1] - A_interface[1:])
dB = (B_interface[:-1] - B_interface[1:])
zaxis = Hybrid(zaxis.values, A=A, B=B)
for i, var in enumerate(dataset):
if var.hasaxis('lev'):
dataset[i] = var.replace_axes(lev=zaxis)
# Convert to a dictionary (for referencing by variable name)
data = dict((var.name,var) for var in dataset)
# Compute a pressure field.
# Also, compute a dp field (vertical change in pressure within a gridbox).
if 'surface_pressure' in data and zaxis is not None:
Ps = data['surface_pressure']
A = zaxis.auxasvar('A')
B = zaxis.auxasvar('B')
P = A + B*Ps
P = P.transpose('time','zaxis','lat','lon')
P.atts['units'] = 'mbar'
data['air_pressure'] = P
dA = Var([zaxis], values=dA)
dB = Var([zaxis], values=dB)
dP = dA + dB*Ps
dP = dP.transpose('time','zaxis','lat','lon')
dP.atts['units'] = 'mbar'
data['dp'] = dP
# Grid cell areas
# Pick some arbitrary (but deterministic) variable to get the lat/lon
# if 'cell_area' not in data:
# var = sorted(data.values())[0]
# from ..common import get_area
# data['cell_area'] = get_area(var.lat,var.lon)
# General cleanup stuff
# Make sure the variables have the appropriate names
for name, var in data.iteritems(): var.name = name
# Add extra fields that will be useful for the diagnostics.
data = cls._add_extra_fields(data)
return data
# Method to find all files in the given directory, which can be accessed
# through this interface.
@staticmethod
def find_files (dirname):
from glob import glob
files = []
for filename in "GC_restart.20100101_G5_4x5_COv10_47L.nc", "GEOS-Chem_CO_combust_VOC_emiss_2010.nc", "GEOS-Chem_CO_CH4_source_2010.nc", "GEOS-Chem_CO_loss_freq_2010.nc", "ts*.nc":
files.extend(glob(dirname+"/"+filename))
return files
# Method to find a unique identifying string for this dataset, from the
# given directory name.
@staticmethod
def get_dataname (dirname):
import os
dirs = dirname.split(os.sep)
if dirs[-1] == 'timeseries':
dirs = dirs[:-1]
return dirs[-1]
# Add this interface to the table.
from . import table
table['geoschem-coards'] = GEOSCHEM_Data
|
neishm/EC-CAS-diags
|
eccas_diags/interfaces/geoschem_coards.py
|
Python
|
lgpl-3.0
| 10,919
|
[
"NetCDF"
] |
54c2199fad05c8a4f2f9963de57ff8e0a6706b6091d6dd209e64c88f233367b3
|
"""
pysteps.blending.clim
=====================
Module with methods to read, write and compute past and climatological NWP model
skill scores. The module stores the average daily skill score for the past t days
and updates it every day. The resulting average climatological skill score is
the skill the NWP model skill regresses to during the blended forecast. If no
climatological values are present, the default skill from :cite:`BPS2006` is used.
.. autosummary::
:toctree: ../generated/
get_default_skill
save_skill
calc_clim_skill
"""
import pickle
from pathlib import Path
import numpy as np
def get_default_skill(n_cascade_levels=8, n_models=1):
"""
Get the default climatological skill values as given in :cite:`BPS2006`.
Take subset of n_cascade_levels or add entries with small values (1e-4) if
n_cascade_levels differs from 8.
Parameters
----------
n_cascade_levels: int, optional
Number of cascade levels. Defaults to 8.
n_models: int, optional
Number of NWP models. Defaults to 1.
Returns
-------
default_skill: array-like
Array of shape [model, scale_level] containing the climatological skill
values.
"""
default_skill = np.array(
[0.848, 0.537, 0.237, 0.065, 0.020, 0.0044, 0.0052, 0.0040]
)
n_skill = default_skill.shape[0]
if n_cascade_levels < n_skill:
default_skill = default_skill[0:n_cascade_levels]
elif n_cascade_levels > n_skill:
default_skill = np.append(
default_skill, np.repeat(1e-4, n_cascade_levels - n_skill)
)
return np.resize(default_skill, (n_models, n_cascade_levels))
def save_skill(
current_skill,
validtime,
outdir_path,
n_models=1,
window_length=30,
):
"""
Add the current NWP skill to update today's daily average skill. If the day
is over, update the list of daily average skill covering a rolling window.
Parameters
----------
current_skill: array-like
Array of shape [model, scale_level, ...]
containing the current skill of the different NWP models per cascade
level.
validtime: datetime
Datetime object containing the date and time for which the current
skill are valid.
outdir_path: string
Path to folder where the historical skill are stored. Defaults to
path_workdir from rcparams.
n_models: int, optional
Number of NWP models. Defaults to 1.
window_length: int, optional
Length of window (in days) of daily skill that should be retained.
Defaults to 30.
Returns
-------
None
"""
n_cascade_levels = current_skill.shape[1]
# Load skill_today, a dictionary containing {mean_skill, n, last_validtime}
new_skill_today_file = False
skill_today_file = Path(outdir_path) / "NWP_skill_today.pkl"
if skill_today_file.is_file():
with open(skill_today_file, "rb") as f:
skill_today = pickle.load(f)
if skill_today["mean_skill"].shape != current_skill.shape:
new_skill_today_file = True
else:
new_skill_today_file = True
if new_skill_today_file:
skill_today = {
"mean_skill": np.copy(current_skill),
"n": 0,
"last_validtime": validtime,
}
# Load the past skill which is an array with dimensions day x model x scale_level
past_skill_file = Path(outdir_path) / "NWP_skill_window.npy"
past_skill = None
if past_skill_file.is_file():
past_skill = np.load(past_skill_file)
# First check if we have started a new day wrt the last written skill, in which
# case we should update the daily skill file and reset daily statistics.
if skill_today["last_validtime"].date() < validtime.date():
# Append skill to the list of the past X daily averages.
if (
past_skill is not None
and past_skill.shape[2] == n_cascade_levels
and past_skill.shape[1] == n_models
):
past_skill = np.append(past_skill, [skill_today["mean_skill"]], axis=0)
else:
past_skill = np.array([skill_today["mean_skill"]])
# Remove oldest if the number of entries exceeds the window length.
if past_skill.shape[0] > window_length:
past_skill = np.delete(past_skill, 0, axis=0)
# FIXME also write out last_validtime.date() in this file?
# In that case it will need pickling or netcdf.
# Write out the past skill within the rolling window.
np.save(past_skill_file, past_skill)
# Reset statistics for today.
skill_today["n"] = 0
skill_today["mean_skill"] = 0
# Reset today's skill if needed and/or compute online average from the
# current skill using numerically stable algorithm
skill_today["n"] += 1
skill_today["mean_skill"] += (
current_skill - skill_today["mean_skill"]
) / skill_today["n"]
skill_today["last_validtime"] = validtime
# Make path if path does not exist
skill_today_file.parent.mkdir(exist_ok=True, parents=True)
# Open and write to skill file
with open(skill_today_file, "wb") as f:
pickle.dump(skill_today, f)
return None
def calc_clim_skill(
outdir_path,
n_cascade_levels=8,
n_models=1,
window_length=30,
):
"""
Return the climatological skill based on the daily average skill in the
rolling window. This is done using a geometric mean.
Parameters
----------
n_cascade_levels: int, optional
Number of cascade levels.
outdir_path: string
Path to folder where the historical skill are stored. Defaults to
path_workdir from rcparams.
n_models: int, optional
Number of NWP models. Defaults to 1.
window_length: int, optional
Length of window (in days) over which to compute the climatological
skill. Defaults to 30.
Returns
-------
climatological_mean_skill: array-like
Array of shape [model, scale_level, ...] containing the climatological
(geometric) mean skill.
"""
past_skill_file = Path(outdir_path) / "NWP_skill_window.npy"
# past_skill has dimensions date x model x scale_level x ....
if past_skill_file.is_file():
past_skill = np.load(past_skill_file)
else:
past_skill = np.array(None)
# check if there is enough data to compute the climatological skill
if not past_skill.any():
return get_default_skill(n_cascade_levels, n_models)
elif past_skill.shape[0] < window_length:
return get_default_skill(n_cascade_levels, n_models)
# reduce window if necessary
else:
past_skill = past_skill[-window_length:]
# Make sure past_skill cannot be lower than 10e-5
past_skill = np.where(past_skill < 10e-5, 10e-5, past_skill)
# Calculate climatological skill from the past_skill using the
# geometric mean.
geomean_skill = np.exp(np.log(past_skill).mean(axis=0))
# Make sure skill is always a positive value and a finite value
geomean_skill = np.where(geomean_skill < 10e-5, 10e-5, geomean_skill)
geomean_skill = np.nan_to_num(
geomean_skill, copy=True, nan=10e-5, posinf=10e-5, neginf=10e-5
)
return geomean_skill
|
pySTEPS/pysteps
|
pysteps/blending/clim.py
|
Python
|
bsd-3-clause
| 7,285
|
[
"NetCDF"
] |
dc4b3efae363680b15e1e94460ddd104a2d3e3bfe1198e68b498f69a89a6ee14
|
# -*- coding: utf-8 -*-
import inspect
import sys
from django.utils.translation import ugettext_lazy as _lazy
from mpconstants import countries
from mkt.constants import ratingsbodies
from mkt.constants.ratingsbodies import slugify_iarc_name
class REGION(object):
"""
A region is like a country but more confusing.
id::
The primary key used to identify a region in the DB.
name::
The text that appears in the header and region selector menu.
slug::
The text that gets stored in the cookie or in ?region=<slug>.
Use the ISO-3166 code please.
mcc::
Don't know what an ITU MCC is? They're useful for carrier billing.
Read http://en.wikipedia.org/wiki/List_of_mobile_country_codes
adolescent::
With a mature region (meaning, it has a volume of useful data) we
are able to calculate ratings and rankings independently. If a
store is immature it will continue using the global popularity
measure. If a store is mature it will use the smaller, more
relevant set of data.
weight::
Determines sort order (after slug).
special::
Does this region need to be reviewed separately? That region is
special.
low_memory::
Does this region have low-memory (Tarako) devices?
"""
id = None
name = slug = ''
adolescent = True
mcc = None
weight = 0
ratingsbody = None
special = False
low_memory = False
class RESTOFWORLD(REGION):
id = 1
name = _lazy(u'Rest of World')
slug = 'restofworld'
weight = -1
# These keys are from marketplace-constants
# See https://mana.mozilla.org/wiki/display/MARKET/How+to+add+a+new+region
lookup = {
'ABW': _lazy(u'Aruba'),
'AFG': _lazy(u'Afghanistan'),
'AGO': _lazy(u'Angola'),
'AIA': _lazy(u'Anguilla'),
'ALA': _lazy(u'Åland Islands'),
'ALB': _lazy(u'Albania'),
'AND': _lazy(u'Andorra'),
'ARE': _lazy(u'United Arab Emirates'),
'ARG': _lazy(u'Argentina'),
'ARM': _lazy(u'Armenia'),
'ASM': _lazy(u'American Samoa'),
'ATA': _lazy(u'Antarctica'),
'ATF': _lazy(u'French Southern Territories'),
'ATG': _lazy(u'Antigua and Barbuda'),
'AUS': _lazy(u'Australia'),
'AUT': _lazy(u'Austria'),
'AZE': _lazy(u'Azerbaijan'),
'BDI': _lazy(u'Burundi'),
'BEL': _lazy(u'Belgium'),
'BEN': _lazy(u'Benin'),
'BES': _lazy(u'Bonaire, Sint Eustatius and Saba'),
'BFA': _lazy(u'Burkina Faso'),
'BGD': _lazy(u'Bangladesh'),
'BGR': _lazy(u'Bulgaria'),
'BHR': _lazy(u'Bahrain'),
'BHS': _lazy(u'Bahamas'),
'BIH': _lazy(u'Bosnia and Herzegovina'),
'BLM': _lazy(u'Saint Barthélemy'),
'BLR': _lazy(u'Belarus'),
'BLZ': _lazy(u'Belize'),
'BMU': _lazy(u'Bermuda'),
'BOL': _lazy(u'Bolivia, Plurinational State of'),
'BRA': _lazy(u'Brazil'),
'BRB': _lazy(u'Barbados'),
'BRN': _lazy(u'Brunei Darussalam'),
'BTN': _lazy(u'Bhutan'),
'BVT': _lazy(u'Bouvet Island'),
'BWA': _lazy(u'Botswana'),
'CAF': _lazy(u'Central African Republic'),
'CAN': _lazy(u'Canada'),
'CCK': _lazy(u'Cocos (Keeling) Islands'),
'CHE': _lazy(u'Switzerland'),
'CHL': _lazy(u'Chile'),
'CHN': _lazy(u'China'),
'CIV': _lazy(u"Côte d'Ivoire"),
'CMR': _lazy(u'Cameroon'),
'COD': _lazy(u'Congo, Democratic Republic of the'),
'COG': _lazy(u'Congo'),
'COK': _lazy(u'Cook Islands'),
'COL': _lazy(u'Colombia'),
'COM': _lazy(u'Comoros'),
'CPV': _lazy(u'Cabo Verde'),
'CRI': _lazy(u'Costa Rica'),
'CUB': _lazy(u'Cuba'),
'CUW': _lazy(u'Curaçao'),
'CXR': _lazy(u'Christmas Island'),
'CYM': _lazy(u'Cayman Islands'),
'CYP': _lazy(u'Cyprus'),
'CZE': _lazy(u'Czech Republic'),
'DEU': _lazy(u'Germany'),
'DJI': _lazy(u'Djibouti'),
'DMA': _lazy(u'Dominica'),
'DNK': _lazy(u'Denmark'),
'DOM': _lazy(u'Dominican Republic'),
'DZA': _lazy(u'Algeria'),
'ECU': _lazy(u'Ecuador'),
'EGY': _lazy(u'Egypt'),
'ERI': _lazy(u'Eritrea'),
'ESH': _lazy(u'Western Sahara'),
'ESP': _lazy(u'Spain'),
'EST': _lazy(u'Estonia'),
'ETH': _lazy(u'Ethiopia'),
'FIN': _lazy(u'Finland'),
'FJI': _lazy(u'Fiji'),
'FLK': _lazy(u'Falkland Islands (Malvinas)'),
'FRA': _lazy(u'France'),
'FRO': _lazy(u'Faroe Islands'),
'FSM': _lazy(u'Micronesia, Federated States of'),
'GAB': _lazy(u'Gabon'),
'GBR': _lazy(u'United Kingdom'),
'GEO': _lazy(u'Georgia'),
'GGY': _lazy(u'Guernsey'),
'GHA': _lazy(u'Ghana'),
'GIB': _lazy(u'Gibraltar'),
'GIN': _lazy(u'Guinea-Conakry'),
'GLP': _lazy(u'Guadeloupe'),
'GMB': _lazy(u'Gambia'),
'GNB': _lazy(u'Guinea-Bissau'),
'GNQ': _lazy(u'Equatorial Guinea'),
'GRC': _lazy(u'Greece'),
'GRD': _lazy(u'Grenada'),
'GRL': _lazy(u'Greenland'),
'GTM': _lazy(u'Guatemala'),
'GUF': _lazy(u'French Guiana'),
'GUM': _lazy(u'Guam'),
'GUY': _lazy(u'Guyana'),
'HKG': _lazy(u'Hong Kong'),
'HMD': _lazy(u'Heard Island and McDonald Islands'),
'HND': _lazy(u'Honduras'),
'HRV': _lazy(u'Croatia'),
'HTI': _lazy(u'Haiti'),
'HUN': _lazy(u'Hungary'),
'IDN': _lazy(u'Indonesia'),
'IMN': _lazy(u'Isle of Man'),
'IND': _lazy(u'India'),
'IOT': _lazy(u'British Indian Ocean Territory'),
'IRL': _lazy(u'Ireland'),
'IRQ': _lazy(u'Iraq'),
'ISL': _lazy(u'Iceland'),
'ISR': _lazy(u'Israel'),
'ITA': _lazy(u'Italy'),
'JAM': _lazy(u'Jamaica'),
'JEY': _lazy(u'Jersey'),
'JOR': _lazy(u'Jordan'),
'JPN': _lazy(u'Japan'),
'KAZ': _lazy(u'Kazakhstan'),
'KEN': _lazy(u'Kenya'),
'KGZ': _lazy(u'Kyrgyzstan'),
'KHM': _lazy(u'Cambodia'),
'KIR': _lazy(u'Kiribati'),
'KNA': _lazy(u'Saint Kitts and Nevis'),
'KOR': _lazy(u'Korea, Republic of'),
'KWT': _lazy(u'Kuwait'),
'LAO': _lazy(u"Lao People's Democratic Republic"),
'LBN': _lazy(u'Lebanon'),
'LBR': _lazy(u'Liberia'),
'LBY': _lazy(u'Libya'),
'LCA': _lazy(u'Saint Lucia'),
'LIE': _lazy(u'Liechtenstein'),
'LKA': _lazy(u'Sri Lanka'),
'LSO': _lazy(u'Lesotho'),
'LTU': _lazy(u'Lithuania'),
'LUX': _lazy(u'Luxembourg'),
'LVA': _lazy(u'Latvia'),
'MAC': _lazy(u'Macao'),
'MAF': _lazy(u'Saint Martin (French part)'),
'MAR': _lazy(u'Morocco'),
'MCO': _lazy(u'Monaco'),
'MDA': _lazy(u'Moldova, Republic of'),
'MDG': _lazy(u'Madagascar'),
'MDV': _lazy(u'Maldives'),
'MEX': _lazy(u'Mexico'),
'MHL': _lazy(u'Marshall Islands'),
'MKD': _lazy(u'Macedonia, the former Yugoslav Republic of'),
'MLI': _lazy(u'Mali'),
'MLT': _lazy(u'Malta'),
'MMR': _lazy(u'Myanmar'),
'MNE': _lazy(u'Montenegro'),
'MNG': _lazy(u'Mongolia'),
'MNP': _lazy(u'Northern Mariana Islands'),
'MOZ': _lazy(u'Mozambique'),
'MRT': _lazy(u'Mauritania'),
'MSR': _lazy(u'Montserrat'),
'MTQ': _lazy(u'Martinique'),
'MUS': _lazy(u'Mauritius'),
'MWI': _lazy(u'Malawi'),
'MYS': _lazy(u'Malaysia'),
'MYT': _lazy(u'Mayotte'),
'NAM': _lazy(u'Namibia'),
'NCL': _lazy(u'New Caledonia'),
'NER': _lazy(u'Niger'),
'NFK': _lazy(u'Norfolk Island'),
'NGA': _lazy(u'Nigeria'),
'NIC': _lazy(u'Nicaragua'),
'NIU': _lazy(u'Niue'),
'NLD': _lazy(u'Netherlands'),
'NOR': _lazy(u'Norway'),
'NPL': _lazy(u'Nepal'),
'NRU': _lazy(u'Nauru'),
'NZL': _lazy(u'New Zealand'),
'OMN': _lazy(u'Oman'),
'PAK': _lazy(u'Pakistan'),
'PAN': _lazy(u'Panama'),
'PCN': _lazy(u'Pitcairn'),
'PER': _lazy(u'Peru'),
'PHL': _lazy(u'Philippines'),
'PLW': _lazy(u'Palau'),
'PNG': _lazy(u'Papua New Guinea'),
'POL': _lazy(u'Poland'),
'PRI': _lazy(u'Puerto Rico'),
'PRT': _lazy(u'Portugal'),
'PRY': _lazy(u'Paraguay'),
'PSE': _lazy(u'Palestine, State of'),
'PYF': _lazy(u'French Polynesia'),
'QAT': _lazy(u'Qatar'),
'REU': _lazy(u'Réunion'),
'ROU': _lazy(u'Romania'),
'RUS': _lazy(u'Russia'),
'RWA': _lazy(u'Rwanda'),
'SAU': _lazy(u'Saudi Arabia'),
'SDN': _lazy(u'Sudan'),
'SEN': _lazy(u'Senegal'),
'SGP': _lazy(u'Singapore'),
'SGS': _lazy(u'South Georgia and the South Sandwich Islands'),
'SHN': _lazy(u'Saint Helena, Ascension and Tristan da Cunha'),
'SJM': _lazy(u'Svalbard and Jan Mayen'),
'SLB': _lazy(u'Solomon Islands'),
'SLE': _lazy(u'Sierra Leone'),
'SLV': _lazy(u'El Salvador'),
'SMR': _lazy(u'San Marino'),
'SOM': _lazy(u'Somalia'),
'SPM': _lazy(u'Saint Pierre and Miquelon'),
'SRB': _lazy(u'Serbia'),
'SSD': _lazy(u'South Sudan'),
'STP': _lazy(u'Sao Tome and Principe'),
'SUR': _lazy(u'Suriname'),
'SVK': _lazy(u'Slovakia'),
'SVN': _lazy(u'Slovenia'),
'SWE': _lazy(u'Sweden'),
'SWZ': _lazy(u'Swaziland'),
'SXM': _lazy(u'Sint Maarten (Dutch part)'),
'SYC': _lazy(u'Seychelles'),
'SYR': _lazy(u'Syrian Arab Republic'),
'TCA': _lazy(u'Turks and Caicos Islands'),
'TCD': _lazy(u'Chad'),
'TGO': _lazy(u'Togo'),
'THA': _lazy(u'Thailand'),
'TJK': _lazy(u'Tajikistan'),
'TKL': _lazy(u'Tokelau'),
'TKM': _lazy(u'Turkmenistan'),
'TLS': _lazy(u'Timor-Leste'),
'TON': _lazy(u'Tonga'),
'TTO': _lazy(u'Trinidad and Tobago'),
'TUN': _lazy(u'Tunisia'),
'TUR': _lazy(u'Turkey'),
'TUV': _lazy(u'Tuvalu'),
'TWN': _lazy(u'Taiwan'),
'TZA': _lazy(u'Tanzania'),
'UGA': _lazy(u'Uganda'),
'UKR': _lazy(u'Ukraine'),
'UMI': _lazy(u'United States Minor Outlying Islands'),
'URY': _lazy(u'Uruguay'),
'USA': _lazy(u'United States'),
'UZB': _lazy(u'Uzbekistan'),
'VAT': _lazy(u'Holy See'),
'VCT': _lazy(u'Saint Vincent and the Grenadines'),
'VEN': _lazy(u'Venezuela'),
'VGB': _lazy(u'Virgin Islands, British'),
'VIR': _lazy(u'Virgin Islands, U.S.'),
'VNM': _lazy(u'Viet Nam'),
'VUT': _lazy(u'Vanuatu'),
'WLF': _lazy(u'Wallis and Futuna'),
'WSM': _lazy(u'Samoa'),
'YEM': _lazy(u'Yemen'),
'ZAF': _lazy(u'South Africa'),
'ZMB': _lazy(u'Zambia'),
'ZWE': _lazy(u'Zimbabwe'),
}
for k, translation in lookup.items():
country = countries.COUNTRY_DETAILS[k].copy()
country['name'] = translation
if country.get('ratingsbody'):
country['ratingsbody'] = getattr(ratingsbodies, country['ratingsbody'])
locals()[k] = type(k, (REGION,), country)
# Please adhere to the new region checklist when adding a new region:
# https://mana.mozilla.org/wiki/display/MARKET/How+to+add+a+new+region
# Create a list of tuples like so (in alphabetical order):
#
# [('restofworld', <class 'mkt.constants.regions.RESTOFWORLD'>),
# ('brazil', <class 'mkt.constants.regions.BR'>),
# ('usa', <class 'mkt.constants.regions.USA'>)]
#
DEFINED = sorted(inspect.getmembers(sys.modules[__name__], inspect.isclass),
key=lambda x: getattr(x, 'slug', None))
REGIONS_CHOICES = (
[('restofworld', RESTOFWORLD)] +
sorted([(v.slug, v) for k, v in DEFINED if v.id and v.weight > -1],
key=lambda x: x[1].weight, reverse=True)
)
BY_SLUG = sorted([v for k, v in DEFINED if v.id and v.weight > -1],
key=lambda v: v.slug)
REGIONS_CHOICES_SLUG = ([('restofworld', RESTOFWORLD)] +
[(v.slug, v) for v in BY_SLUG])
REGIONS_CHOICES_ID = ([(RESTOFWORLD.id, RESTOFWORLD)] +
[(v.id, v) for v in BY_SLUG])
# Rest of World last here so we can display it after all the other regions.
REGIONS_CHOICES_NAME = ([(v.id, v.name) for v in BY_SLUG] +
[(RESTOFWORLD.id, RESTOFWORLD.name)])
REGIONS_DICT = dict(REGIONS_CHOICES)
REGIONS_CHOICES_ID_DICT = dict(REGIONS_CHOICES_ID)
# Provide a dict for looking up the region by slug that includes aliases:
# - "worldwide" is an alias for RESTOFWORLD (bug 940561).
# - "gb" is an alias for GBR (bug 973883).
# Note: GBR is inserted into locals() above
REGION_LOOKUP = dict(
REGIONS_DICT.items() +
[('worldwide', RESTOFWORLD), ('gb', locals()['GBR'])])
ALL_REGIONS = frozenset(REGIONS_DICT.values())
ALL_REGION_IDS = sorted(REGIONS_CHOICES_ID_DICT.keys())
SPECIAL_REGIONS = [x for x in BY_SLUG if x.special]
SPECIAL_REGION_IDS = sorted(x.id for x in SPECIAL_REGIONS)
# Regions not including restofworld.
REGION_IDS = sorted(REGIONS_CHOICES_ID_DICT.keys())[1:]
# Mature regions.
MATURE_REGION_IDS = sorted(x.id for x in ALL_REGIONS if not x.adolescent)
GENERIC_RATING_REGION_SLUG = 'generic'
def ALL_REGIONS_WITH_CONTENT_RATINGS():
"""Regions that have ratings bodies."""
return [x for x in ALL_REGIONS if x.ratingsbody]
def ALL_REGIONS_WITHOUT_CONTENT_RATINGS():
"""
Regions without ratings bodies and fallback to the GENERIC rating body.
"""
return set(ALL_REGIONS) - set(ALL_REGIONS_WITH_CONTENT_RATINGS())
def REGION_TO_RATINGS_BODY():
"""
Return a map of region slugs to ratings body labels for use in
serializers and to send to Fireplace.
e.g. {'us': 'esrb', 'mx': 'esrb', 'es': 'pegi', 'br': 'classind'}.
"""
# Create the mapping.
region_to_bodies = {}
for region in ALL_REGIONS_WITH_CONTENT_RATINGS():
ratings_body_label = GENERIC_RATING_REGION_SLUG
if region.ratingsbody:
ratings_body_label = slugify_iarc_name(region.ratingsbody)
region_to_bodies[region.slug] = ratings_body_label
return region_to_bodies
def REGIONS_LIST_SORTED_BY_NAME():
"""Get the region list and sort by name.
Requires a function due to localisation.
"""
# Avoid circular import.
from mkt.regions.utils import remove_accents
by_name = sorted([v for k, v in DEFINED if v.id and v.weight > -1],
key=lambda v: remove_accents(unicode(v.name)))
by_name.append(RESTOFWORLD)
return by_name
def REGIONS_CHOICES_SORTED_BY_NAME():
"""Get the region choices and sort by name.
Requires a function due to localisation.
"""
return [(v.id, v.name) for v in REGIONS_LIST_SORTED_BY_NAME()]
REGIONS_BY_MCC = {c['mcc']: c['slug']
for c in countries.COUNTRY_DETAILS.itervalues()
if 'mcc' in c}
|
washort/zamboni
|
mkt/constants/regions.py
|
Python
|
bsd-3-clause
| 14,156
|
[
"BWA"
] |
2fc396c337a74573de82c31346ffb53f962a875c6f3219e0fb481e57dcf411ed
|
import orca
def add_dependent_columns(base_dfname, new_dfname):
tbl = orca.get_table(new_dfname)
for col in tbl.columns:
print "Adding", col
orca.add_column(base_dfname, col, tbl[col])
|
bhargavasana/activitysim
|
activitysim/defaults/models/util/misc.py
|
Python
|
agpl-3.0
| 211
|
[
"ORCA"
] |
c6c391bf70e556081797cb8b6efcf2014f160b491fb3b1bcc7da1771fc9461be
|
#!/usr/bin/python
#-*- coding: utf-8 -*-
# Copyright (C) 2015, Nikolai Chernikov <nikolai.chernikov.ru@gmail.com>
#
# "convert2ugrid" is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License v3+. "convert2ugrid" is distributed in the
# hope that it will be useful, but WITHOUT ANY WARRANTY. Consult the file
# LICENSE.GPL or www.gnu.org/licenses/gpl-3.0.txt for the full license terms.
import numpy as np
import os.path
import netCDF4
from . import ui, sprint
import process_cdl
import process_mossco_netcdf
from Mesh2 import gridhelp
class netcdfVariableReader(object):
""" Abstract class providing useful methods for quick access to NetCDF file.
This class """
def __init__(self):
pass
def check_dtype(self, _object, dtype, raise_error=True):
"""Compares type(_object) to "dtype" """
if dtype in (str, unicode): # here we treat unicode and simple string as same objects
dtype = (str, unicode)
if isinstance(_object, dtype):
return True
else:
if raise_error:
raise TypeError('<{0}> should be of type <{2}>. Is {1}'.format(_object, type(_object), str(dtype)))
else:
return False
def get_string_with_netcdf_varnames(self, ncname, prefix='', withdims=True):
""" Method returns a nice string of enumerated varnames within netcdf file
Inputs:
ncname - string, name of the netcdf datafile
prefix - string, prefix for first line of the "s"
Returns:
s - string
"""
self.check_dtype(ncname, str)
self.check_dtype(prefix, str)
self.check_dtype(withdims, bool)
nc = netCDF4.Dataset(ncname, mode='r')
s = prefix+' File <{0}> contains following variables:'.format(ncname)
for i, varname in enumerate( sorted(nc.variables.keys()) ):
if withdims: s += '\n\t{0:3d}) {1} {2} {3}'.format(i, varname, nc.variables[varname].dimensions, nc.variables[varname].shape)
else: s += '\n\t{0:3d}) {1}'.format(i, varname)
nc.close()
del nc
return s
def variableIsFound(self, ncname, varname):
""" Method tells user if variable has been found in netcdf file.
Inputs:
ncname - string, name of the netcdf datafile
varname - string, name of the variable within netcdf file
Returns:
found - True/False
"""
if varname is None: return False
self.check_dtype(ncname, str)
self.check_dtype(varname, str)
found = False
nc = netCDF4.Dataset(ncname, mode='r')
if varname in nc.variables.keys():
found = True
nc.close()
del nc
return found
def promtVariableNameInput(self, ncname, promtInputMessage='Type the name of the variable to pick:', printAvailableVarNames=True, dtype=str):
""" Method asks user to choose variable from file
Inputs:
ncname - string, name of the netcdf datafile
promtInputMessage - string, message to ask for variable input. Is used only if "promtInput=True"
Returns:
varname - string, name of the variable within netcdf file, since it may not be equall to input
"""
self.check_dtype(printAvailableVarNames, bool)
self.check_dtype(promtInputMessage, str)
self.check_dtype(ncname, str)
nc = netCDF4.Dataset(ncname, mode='r')
if printAvailableVarNames:
print self.get_string_with_netcdf_varnames(ncname)
print 'Select variable from file <{0}>. All available variables are listed above'.format(ncname)
else:
pass
#print 'Select variable from file <{0}>'.format(ncname)
varname = None
while varname not in nc.variables.keys():
varname = ui.promt(promtInputMessage, color='yellow', type=dtype, show_default=False)
nc.close()
del nc
return varname
def read_netcdfVarMetadata(self, ncname, varname, raise_error=True, promtInput=True, **kwargs):
""" Method reads given variable from netcdf file and returns metadata
Inputs:
ncname - string, name of the netcdf datafile
varname - string, name of the variable within netcdf file
raise_error - True/False, if True will raise error when file not found
promtInput - True/False,If True, will not return False if variable not found
not found in netcdf file. Instead ask user to type var-name manually,
using "promtInputMessage" in **kwargs
**kwargs - will be passed to promtVariableNameInput()
Returns:
metadata - dictionary or None
"""
self.check_dtype(ncname, str)
self.check_dtype(varname, str)
if not self.variableIsFound(ncname, varname):
if promtInput is True:
varname = self.promtVariableNameInput(**kwargs)
else:
if raise_error:
raise IndexError('No such variable <0> in file <{1}>'.format(varname, ncname))
else:
return None
nc = netCDF4.Dataset(ncname, mode='r')
metadata = dict()
var = nc.variables[varname]
metadata['fname'] = ncname # source
metadata['name'] = varname # source
metadata['dims'] = var.dimensions
metadata['dtype'] = var.dtype
metadata['shape'] = var.shape # will be empty tuple <()> if a single value
metadata['size'] = var.size # number of elements
# for some reason not working, printing "attribute 'mask' not found"
#metadata['mask'] = var.mask # True/False flag
metadata['fillvalue'] = var._FillValue if '_FillValue' in var.ncattrs() else None
metadata['nNonOneDims'] = len(filter(lambda a: a != 1, var.shape)) # length of the array.shape excluding single dimensions, for <()> will give 0
metadata['attrs'] = dict()
if var.ncattrs(): # if not empty list
for attr_n in sorted(var.ncattrs()):
metadata['attrs'][attr_n] = var.getncattr(attr_n)
nc.close()
del nc
return metadata
def read_netcdfVarData(self, ncname, varname, squeeze=False, raise_error=True, promtInput=True, **kwargs):
""" Method reads given variable from netcdf file and returns array
Inputs:
ncname - string, name of the netcdf datafile
varname - string, name of the variable within netcdf file
squeeze - True/False. If True, remove single-dimensional entries from the shape of an array.
raise_error - True/False, if True will raise error when file not found
promtInput - True/False,If True, will not return False if variable not found
not found in netcdf file. Instead ask user to type var-name manually,
using "promtInputMessage" in **kwargs
**kwargs - will be passed to variableIsFound()
Returns:
data - ndarray or None
"""
self.check_dtype(ncname, str)
self.check_dtype(varname, str)
self.check_dtype(squeeze, bool)
if not self.variableIsFound(ncname, varname):
if promtInput is True:
varname = self.promtVariableNameInput(**kwargs)
else:
if raise_error:
raise IndexError('No such variable <0> in file <{1}>'.format(varname, ncname))
else:
return None
nc = netCDF4.Dataset(ncname, mode='r')
data = nc.variables[varname][...] # with this syntax we catch also null-dimensional arrays (i.e. scalars)
if squeeze:
# note here errors may arise: if scalar is used, it will be converted to ndarray of shape <()>
data = np.squeeze(data)
nc.close()
del nc
return data
class cdlVariableExt(netcdfVariableReader):
"""Class-extension of a metadata cdlVariable()
This object, in contrast to its parent, can process netcdf data-file and read
meta-data from them. This is exactly what is being done: gathering meta-info from
the source netcdf-variable of the passed parent
Args:
parent (process_cdl.cdlVariable):
metadata from CDL
"""
def __init__(self, parent):
super(cdlVariableExt, self).__init__()
if not isinstance(parent, process_cdl.cdlVariable):
raise TypeError('Invalid `parent` type. Should be <cdlVariable>, received {0}'.format(type(parent)))
self._parent = parent
self._init_constants()
# do not know if it is good idea to check at the init stage
# whether netcdf file is valid, and var is whithin
self._source = self._search_source_metadata()
def _init_constants(self):
# overwriting method of parent
self._source_attrs = ['_source_filename', '_source_varname']
def _search_source_metadata(self):
parent_attrs = self._parent.get_attrs()
if all(attr in parent_attrs.keys() for attr in self._source_attrs):
# if the file exists
if os.path.isfile( parent_attrs[self._source_attrs[0]] ):
return self.read_netcdfVarMetadata(parent_attrs[self._source_attrs[0]], parent_attrs[self._source_attrs[1]])
else:
raise IOError('Special attribute <_source_filename>: No such file <{0}>'.format(parent_attrs[self._source_attrs[0]]))
else:
raise ValueError('Add missing attributes that define data-file position within filesystem. Both these attributess should exist {0}'.format(self._source_attrs))
def get_parent(self):
return self._parent
def get_source_metadata(self):
#return self._search_source_metadata()
return self._source
class containerForGridGeneration(netcdfVariableReader):
"""Class is a container for information, required to generate grid."""
def __init__(self, topofile, log=False):
'''
Args:
topofile (str):
name of the netcdf file with topology info
'''
super(containerForGridGeneration, self).__init__()
self.check_dtype(topofile, str)
self._topo = topofile
self._init_constants()
self._init_coordinates_and_bathymetry_metadata(self._topo, log=log)
if log: print self.get_string_metadata_summary()
def _init_constants(self):
self._bathymetry = dict()
self._x_coords = dict()
self._y_coords = dict()
self._grid_info = dict()
self._constants = dict()
self._constants['x_cartesian_vnames'] = ['x', 'xx', 'x_x' , 'xX', 'x_X', 'xt', 'x_t', 'xT', 'x_T']
self._constants['x_geographi_vnames'] = ['lon', 'lonx', 'lon_x' , 'lonX', 'lon_X', 'lont', 'lon_t', 'lonT', 'lon_T']
self._constants['y_cartesian_vnames'] = ['y', 'yx', 'y_x' , 'yX', 'y_X', 'yt', 'y_t', 'yT', 'y_T']
self._constants['y_geographi_vnames'] = ['lat', 'latx', 'lat_x' , 'latX', 'lat_X', 'latt', 'lat_t', 'latT', 'lat_T']
self._constants['bathymetry_vnames'] = ['bathymetry']
self._constants['fv_attr_namelist'] = ['_FillValue', 'missing_value']
def get_constants(self):
return self._constants
def get_metadata(self):
return {'x': self._x_coords['meta'],
'y': self._y_coords['meta'],
'b': self._bathymetry['meta'],
'grid_info': self._grid_info}
def get_data(self, **kwargs):
return {'x': self.read_netcdfVarData(self._topo, self.get_metadata()['x']['name'], **kwargs),
'y': self.read_netcdfVarData(self._topo, self.get_metadata()['y']['name'], **kwargs),
'b': self.read_netcdfVarData(self._topo, self.get_metadata()['b']['name'], **kwargs)}
def get_mask(self, transpose=True, **kwargs):
return self._generate_mask(transpose=transpose, **kwargs)
def get_string_metadata_summary(self):
s = ''
s += '\n'+'-------------------------------------------------------------------------------'
s += '\n'+'------------------------- Grid Detection Summary ------------------------------'
s += '\n'+'-------------------------------------------------------------------------------'
s += '\n'+'X-coords :'
s += '\n'+' variable <{0}>'.format(self.get_metadata()['x']['name'])
s += '\n'+' dimensions <{0}>'.format(self.get_metadata()['x']['dims'])
s += '\n'+' shape <{0}>'.format(self.get_metadata()['x']['shape'])
s += '\n'+' location <{0}>'.format(self.get_metadata()['x']['points_location'])
s += '\n'+' type <{0}>'.format(self.get_metadata()['x']['coordinate_type'])
s += '\n'+' units <{0}>'.format(self.get_metadata()['x']['attrs']['units'] if 'units' in self.get_metadata()['x']['attrs'] else 'unknown')
s += '\n'+'Y-coords :'
s += '\n'+' variable <{0}>'.format(self.get_metadata()['y']['name'])
s += '\n'+' dimensions <{0}>'.format(self.get_metadata()['y']['dims'])
s += '\n'+' shape <{0}>'.format(self.get_metadata()['y']['shape'])
s += '\n'+' location <{0}>'.format(self.get_metadata()['y']['points_location'])
s += '\n'+' type <{0}>'.format(self.get_metadata()['y']['coordinate_type'])
s += '\n'+' units <{0}>'.format(self.get_metadata()['y']['attrs']['units'] if 'units' in self.get_metadata()['y']['attrs'] else 'unknown')
s += '\n'+'Bathymetry:'
s += '\n'+' variable <{0}>'.format(self.get_metadata()['b']['name'])
s += '\n'+' dimensions <{0}>'.format(self.get_metadata()['b']['dims'])
s += '\n'+' shape <{0}>'.format(self.get_metadata()['b']['shape'])
s += '\n'+' location <{0}>'.format(self.get_metadata()['b']['points_location'])
s += '\n'+'Grid Info :'
s += '\n'+' type <{0}>'.format(self.get_metadata()['grid_info']['type'])
s += '\n'+'-------------------------------------------------------------------------------'
s += '\n'+'-------------------------------------------------------------------------------'
s += '\n'+'-------------------------------------------------------------------------------'
return s
def _init_coordinates_and_bathymetry_metadata(self, ncname, bathymetry_vname=None, x_vname=None, y_vname=None, log=False):
self.check_dtype(ncname, str)
long_var_list_not_shot = True
if bathymetry_vname is not None: self.check_dtype(bathymetry_vname, str)
if x_vname is not None: self.check_dtype(x_vname, str)
if y_vname is not None: self.check_dtype(y_vname, str)
# searching for bathymetry
# ----------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------
if bathymetry_vname and not self.variableIsFound(ncname, bathymetry_vname) :
ui.promt('Bathymetry: User-defined bathymetry-variable name <{0}> not found in file <{1}>. I will try to match default names. Press Enter to continue'.format(
bathymetry_vname, ncname), pause=True)
if not bathymetry_vname:
if log: print 'Bathymetry: Will try to match bathymetry-name from default list...'
for bathymetry_vname in self.get_constants()['bathymetry_vnames']:
found = self.variableIsFound(ncname, bathymetry_vname)
if found: # if var is found
sprint('Bathymetry: Variable found: <{0}>'.format(bathymetry_vname), log=log)
break
if not found: # we cycled through whole loop and havent found any var
sprint('Bathymetry: Bathymetry not found: No variable with name from default namelist found.', mode='warning')
sprint('Bathymetry: Default bathymetry namelist: {0}'.format(self.get_constants()['bathymetry_vnames']), mode='warning')
if long_var_list_not_shot:
ui.promt('Bathymetry: Choose manually. Press Enter to list all available variables in current file', pause=True)
ndim = -1
while ndim != 2:
bathymetry_vname = self.promtVariableNameInput(ncname, promtInputMessage='Bathymetry: Type the name of the bathymetry-variable to pick (should be 2D):', printAvailableVarNames=long_var_list_not_shot)
ndim = self.read_netcdfVarMetadata(ncname, bathymetry_vname, raise_error=False, promtInput=False)['nNonOneDims']
if long_var_list_not_shot: long_var_list_not_shot ^= long_var_list_not_shot
# now saving bathymetry meta-data
self._bathymetry['meta'] = self.read_netcdfVarMetadata(ncname, bathymetry_vname, raise_error=True, promtInput=False)
sprint('Bathymetry: Picking bathymetry variable: <{0}> , dimensions {1}, shape {2}'.format(self._bathymetry['meta']['name'], self._bathymetry['meta']['dims'], self._bathymetry['meta']['shape']), log=log)
# ----------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------
# now picking X and Y coords based on bathymetry
# ----------------------------------------------
# 1) try to use passed if any
# if not passed
# go to (2)
# elif not found
# go to (2)
# 2) try to use from bathymetry dim-name
# if not found
# go to (3)
# 3) try to use from default list
# if nothing has been found
# ask user to type manually
# elif one var has been found for x and for y
# take them
# elif more then one var has been found for x and for y
# ask user to choose one
# ----------------------------------------------------
# X coords...
# ----------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------
if x_vname and not self.variableIsFound(ncname, x_vname):
ui.promt('X coords: User-defined X-coord-variable name <{0}> not found in file <{1}>. Press Enter to continue auto-search'.format(x_vname, ncname), pause=True, color='yellow')
if not x_vname:
dim_bath = self._bathymetry['meta']['dims']
sprint('X coords: Trying to find variable based on bathymetry dimensions <{0}>. Searching for variable <{1}>'.format(dim_bath, dim_bath[-1] ), log=log)
if not self.variableIsFound(ncname, dim_bath[-1]):
sprint('X coords: variable name <{0}> not found in file <{1}>.'.format(dim_bath[-1], ncname), log=log, mode='warning')
x_default_list = self.get_constants()['x_cartesian_vnames']+self.get_constants()['x_geographi_vnames']
sprint('X coords: Searching for variables with name from default list {0}'.format(x_default_list), log=log)
x_found = list()
for x_n in x_default_list:
if self.variableIsFound(ncname, x_n):
x_found.append(x_n)
# X case (1)
if len(x_found) == 0: # nothing found, ask user to type name
sprint('X coords: Nothing found. Choose manually.', mode='warning')
if long_var_list_not_shot:
ui.promt('X coords: Press Enter to list all available variables in current file', pause=True)
ndim = -1
while ndim not in [1, 2]:
x_vname = self.promtVariableNameInput(ncname, promtInputMessage='X coords: Type the name of the X-coords-variable to pick (should be 1D or 2D):', printAvailableVarNames=long_var_list_not_shot)
ndim = self.read_netcdfVarMetadata(ncname, x_vname, raise_error=False, promtInput=False)['nNonOneDims']
if long_var_list_not_shot: long_var_list_not_shot ^= long_var_list_not_shot
# X case (2)
elif len(x_found) == 1:
x_vname = x_found[0]
# X case (3)
else: # len(x_found) > 1
sprint('X coords: More than 1 X-coords variable exist; variables found:', mode='warning')
for x_v_n in x_found:
print '\t', x_v_n
x_vname = self.promtVariableNameInput(ncname, promtInputMessage='X coords: Type the name of the X-coords-variable to pick:', printAvailableVarNames=False)
else:
x_vname = dim_bath[-1]
self._x_coords['meta'] = self.read_netcdfVarMetadata(ncname, x_vname, raise_error=True, promtInput=False)
sprint('X coords: Picking Y-coords variable: <{0}> , dimensions {1}, shape {2}'.format(self._x_coords['meta']['name'], self._x_coords['meta']['dims'], self._x_coords['meta']['shape']), log=log)
# ----------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------
# Y coords...
# ----------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------
if y_vname and not self.variableIsFound(ncname, y_vname):
ui.promt('Y coords: User-defined Y-coord-variable name <{0}> not found in file <{1}>. Press Enter to continue auto-search'.format(y_vname, ncname), pause=True)
if not y_vname:
dim_bath = self._bathymetry['meta']['dims']
sprint('Y coords: Trying to find variable based on bathymetry dimensions <{0}>. Searching for variable <{1}>'.format(dim_bath, dim_bath[-2] ), log=log)
if not self.variableIsFound(ncname, dim_bath[-2]):
sprint('Y coords: variable name <{0}> not found in file <{1}>.'.format(dim_bath[-2], ncname), log=log, mode='warning')
y_default_list = self.get_constants()['y_cartesian_vnames']+self.get_constants()['y_geographi_vnames']
sprint('Y coords: Searching for variables with name from default list {0}'.format(y_default_list), log=log)
y_found = list()
for y_n in y_default_list:
if self.variableIsFound(ncname, y_n):
y_found.append(y_n)
# Y case (1)
if len(y_found) == 0: # nothing found, ask user to type name
sprint('Y coords: Nothing found. Choose manually.', mode='warning')
if long_var_list_not_shot:
ui.promt('Y coords: Press Enter to list all available variables in current file', pause=True)
ndim = -1
while ndim not in [1, 2]:
y_vname = self.promtVariableNameInput(ncname, promtInputMessage='Y coords: Type the name of the Y-coords-variable to pick (should be 1D or 2D):', printAvailableVarNames=long_var_list_not_shot)
ndim = self.read_netcdfVarMetadata(ncname, y_vname, raise_error=False, promtInput=False)['nNonOneDims']
if long_var_list_not_shot: long_var_list_not_shot ^= long_var_list_not_shot
# Y case (2)
elif len(y_found) == 1:
y_vname = y_found[0]
# Y case (3)
else: # len(y_found) > 1
sprint('Y coords: More than 1 Y-coords variable exist; variables found:', mode='warning')
for y_v_n in y_found:
print '\t', y_v_n
y_vname = self.promtVariableNameInput(ncname, promtInputMessage='Y coords: Type the name of the Y-coords-variable to pick:', printAvailableVarNames=False)
else:
y_vname = dim_bath[-2]
self._y_coords['meta'] = self.read_netcdfVarMetadata(ncname, y_vname, raise_error=True, promtInput=False)
if log: print 'Y coords: Picking Y-coords variable: <{0}> , dimensions {1}, shape {2}'.format(
self._y_coords['meta']['name'], self._y_coords['meta']['dims'], self._y_coords['meta']['shape'])
# ----------------------------------------------------------------------------------
# -------------------------------------------------------
# now we know var-names for X and Y
# -------------------------------------------------------
# now determine if it is rectangular grid or curvilinear....
#
# if x-coord and y-coord are 1d arrays
# then the grid is rectangular but the cells may be of
# any rectangular shape
#
# if x-y- coords are 2d arrays,
# then the grid is curvilinear
# NOTE: here someone can paste rectangular grid with two 2d arrays... but it will still work
if len(self._x_coords['meta']['shape']) == 1 and len(self._y_coords['meta']['shape']) == 1:
self._grid_info['type'] = 'rectangular'
elif len(self._x_coords['meta']['shape']) == 2 and len(self._y_coords['meta']['shape']) == 2:
self._grid_info['type'] = 'curvilinear'
else:
sprint('GridType: Using variable for X-coords <{0}>, of shape <{1}>'.format(self._x_coords['meta']['name'], self._x_coords['meta']['shape']), mode='fail')
sprint('GridType: Using variable for Y-coords <{0}>, of shape <{1}>'.format(self._y_coords['meta']['name'], self._y_coords['meta']['shape']), mode='fail')
raise ValueError('Grid type not understood. X and Y should be either two 1D arrays or two 2D arrays'+'\n'+gridhelp())
# now determine if coords are at T (cell center) or X (cell nodes) points...GridType:
sprint('Data location: X-coords variable: <{0}> , dimensions {1}, shape {2}'.format(self._x_coords['meta']['name'], self._x_coords['meta']['dims'], self._x_coords['meta']['shape']))
sprint('Data location: Y-coords variable: <{0}> , dimensions {1}, shape {2}'.format(self._y_coords['meta']['name'], self._y_coords['meta']['dims'], self._y_coords['meta']['shape']))
sprint('Data location: Bathymetry variable: <{0}> , dimensions {1}, shape {2}'.format(self._bathymetry['meta']['name'], self._bathymetry['meta']['dims'], self._bathymetry['meta']['shape']))
xy_location = None
while xy_location not in ['x', 'X', 't', 'T']:
xy_location = ui.promt('Data location: select whether origin XY-COORDS data is located at nodes(X_points) or at cell centers(T_points) [X/T]:', color='yellow', show_default=False, type=str)
if xy_location in ['x', 'X']:
self._x_coords['meta']['points_location'] = 'X_points'
self._y_coords['meta']['points_location'] = 'X_points'
else:
self._x_coords['meta']['points_location'] = 'T_points'
self._y_coords['meta']['points_location'] = 'T_points'
bt_location = None
while bt_location not in ['x', 'X', 't', 'T']:
bt_location = ui.promt('Data location: select whether origin BATHYMETRY data is located at nodes(X_points) or at cell centers(T_points) [X/T]:', color='yellow', show_default=False, type=str)
if bt_location in ['x', 'X']:
self._bathymetry['meta']['points_location'] = 'X_points'
else:
self._bathymetry['meta']['points_location'] = 'T_points'
# determine mode.... (Geographic or Cartesian)
if self._x_coords['meta']['name'] in self.get_constants()['x_cartesian_vnames'] and self._y_coords['meta']['name'] in self.get_constants()['y_cartesian_vnames']:
self._x_coords['meta']['coordinate_type'] = 'cartesian'
self._y_coords['meta']['coordinate_type'] = 'cartesian'
elif self._x_coords['meta']['name'] in self.get_constants()['x_geographi_vnames'] and self._y_coords['meta']['name'] in self.get_constants()['y_geographi_vnames']:
self._x_coords['meta']['coordinate_type'] = 'geographic'
self._y_coords['meta']['coordinate_type'] = 'geographic'
else:
# now show user found vars
sprint('Coords type: X-coords variable: <{0}> , dimensions {1}, shape {2}'.format(self._x_coords['meta']['name'], self._x_coords['meta']['dims'], self._x_coords['meta']['shape']))
sprint('Coords type: Y-coords variable: <{0}> , dimensions {1}, shape {2}'.format(self._y_coords['meta']['name'], self._y_coords['meta']['dims'], self._y_coords['meta']['shape']))
coord_mode = None
while coord_mode not in ['c', 'g']:
coord_mode = ui.promt('Coords type: coord-type not understood. Choose cartesian or geographic. Type [c/g]:', color='yellow', show_default=False, type=str)
if coord_mode == 'c':
self._x_coords['meta']['coordinate_type'] = 'cartesian'
self._y_coords['meta']['coordinate_type'] = 'cartesian'
if coord_mode == 'g':
self._x_coords['meta']['coordinate_type'] = 'geographic'
self._y_coords['meta']['coordinate_type'] = 'geographic'
def _generate_mask(self, maskvalue=None, transpose=False, log=False):
''' Function reads an 2D array (in MOSSCO - 'bathymetry') and generates a boolean mask
array (True means that value is masked), based on 'fillvalue'
We have here 2 possibilities depending on data - location:
1) bathymetry at X_points
2) bathymetry at T_points
x-----x-----x-----x-----x
| | | | |
| T | T | T | T |
| | | | |
x-----x-----x-----x-----x
| | | | |
| T | T | T | T |
| | | | |
x-----x-----x-----x-----x
| | | | |
| T | T | T | T |
| | | | |
x-----x-----x-----x-----x
input:
filename - string to filename (relative path)
varname - string containing name of the variable
fillvalue - float indicating the values to be masked (mask=True). By default (fillvalue=None)
gets _FillValue automatically
transpose - a boolean flag to return transposed array or not
'''
_n = 'generate_mask(): '
meta = self.get_metadata()
if log: print _n, 'Working with variable <{0}> of shape {2} from file <{1}>'.format(meta['b']['name'], meta['b']['fname'], meta['b']['shape'])
array = self.get_data(squeeze=True)['b']
if len(array.shape) == 2:
pass
else:
raise ValueError(_n+" Array should be two dimensional. Received {0}-dimensional".format(len(array.shape)))
location = meta['b']['points_location']
mask_in = None
# if it is already masked array, simply take the mask
if isinstance(array, np.ma.MaskedArray):
mask_in = np.ma.getmaskarray(array)
if not ui.promtYesNo(_n+'Array is already of type <numpy.ma.MaskedArray>\n' + _n +
'Default mask of shape {0} found. Use it? If "no" i will try to build mask based on _FillValue.'.format(mask_in.shape)):
mask_in = None
# ... or create own mask based on fv
if mask_in is None:
if maskvalue is None: # nothing passed by user
for attr in self.get_constants()['fv_attr_namelist']:
if attr in meta['b']['attrs'].keys():
maskvalue = float(meta['b']['attrs'][attr])
if log: print _n, 'Found attribute <{0}={1}>. I will use this value to generate mask'.format(attr, maskvalue)
break
# if nothing found in <for> , here maskvalue=None
if not maskvalue:
ui.promt(_n+'Current virable does not have any of the following attributes {0}. I will continue without mask. Press ENTER to continue'.format(
self.get_constants()['fv_attr_namelist']), color='yellow', pause=True)
return None
masked_arr = np.ma.masked_values(array, float(maskvalue))
mask_in = np.ma.getmaskarray(masked_arr)
del masked_arr
# so... at this point we should have <mask_in> - the input mask
if location == 'X_points':
# create new face map ( 2D array of T_points), it will have one less index in each dimension
mask_out = np.zeros(tuple([array.shape[0]-1, array.shape[1]-1]), dtype=bool) # by default all cells are valid (mask=False)
print _n, "mask out shaoe: ", mask_out.shape
print _n, "mask in shaoe: ", mask_in.shape
# loop over all face-indexes. If any of the surrounding nodes is invalid (mask=True)
# then the whole face is considered to be invalid as well
for j in xrange(mask_out.shape[0]):
for i in xrange(mask_out.shape[1]):
tl = mask_in[j , i ] #topleft node
tr = mask_in[j , i+1] #topright node
br = mask_in[j+1, i+1] #bottomright node
bl = mask_in[j+1, i ] #bottomleft node
nodes = [tl, tr, br, bl]
if any(node for node in nodes): # cannot write here <if any(node is True...)> since it may be of type <numpy.bool_> and not <bool>
mask_out[j, i] = True
elif location == 'T_points' : # location == 'T_points'
# from numpy docs: "We must keep in mind that a True entry in the mask indicates an invalid data"
mask_out = mask_in
else:
raise ValueError('Invalid data location. Should be <T_points> or <X_points>, received {0}'.format(location))
# at this point mask is created....
if any(mask_out.ravel()) > 0: #True is equal to 1. So if array has at least one masked element, sum will be more than 0
if transpose: mask_out = mask_out.T
if log: print _n, 'Created boolean mask of shape {0} (created from array of shape{1})'.format(mask_out.shape, array.shape)
else:
if log: print _n, 'Array has no invalid entries. Mask is not needed. Returning <None>'
mask_out = None # overwriting nomask value
return mask_out
def create_magnitude_variable_from_x_y_component(VARS, varname, varval, mask=None, log=False):
'''
in:
----
VARS - dictionary generated by function process_cdl.read_file_with_only_variables()
varname - string, item-key of a dictionary VARS, should correspond to varval
varval - item-value of a dictionary VARS, should correspond to varname
varval[0] >>> datatype
varval[1] >>> [dim1,dim2,...]
varval[2] >>> dict(attributes)
Note: all values are stored as strings
out:
----
magnitude - False, or Ndimensional numpy array (maybe masked, depending on the inputs).
It will usually be 1D array (<...> in selections such as x[t, ..., f] is made
to increase flexibility of accepted variables. Nevertheless mostly it will be x[t, f])
'''
magnitude = None
# -----------------------------------------------------------------------------------------------
# Create auto variables
# -----------------------------------------------------------------------------------------------
if '_auto_creation' in varval[2].keys():
if log: print 'Autocreation-variable'
_fnx = VARS[ varval[2]['_auto_creation'].split(',')[0].strip() ] [2]['_mossco_filename']
_fny = VARS[ varval[2]['_auto_creation'].split(',')[1].strip() ] [2]['_mossco_filename']
_vnx = VARS[ varval[2]['_auto_creation'].split(',')[0].strip() ] [2]['_mossco_varname']
_vny = VARS[ varval[2]['_auto_creation'].split(',')[1].strip() ] [2]['_mossco_varname']
# -----------------------------------------------------------------------------------------------
# now check which function (read_mossco_nc_3d, read_mossco_nc_4d) to use to get data....
# -----------------------------------------------------------------------------------------------
if varname.endswith('_2d'):
x, _ = process_mossco_netcdf.read_mossco_nc_3d(_fnx, _vnx, mask=mask)
y, _ = process_mossco_netcdf.read_mossco_nc_3d(_fny, _vny, mask=mask)
magnitude = np.zeros(x.shape)
for t in xrange(x.shape[0]):
for f in xrange(x.shape[-1]):
_x = x[t, ..., f]
_y = y[t, ..., f]
_magnitude = (_x**2 + _y**2)**(1./2.)
magnitude[t, ..., f] = _magnitude
elif varname.endswith('_3d'):
x, _ = process_mossco_netcdf.read_mossco_nc_4d(_fnx, _vnx, mask=mask)
y, _ = process_mossco_netcdf.read_mossco_nc_4d(_fny, _vny, mask=mask)
magnitude = np.zeros(x.shape)
for t in xrange(x.shape[0]):
for z in xrange(x.shape[1]):
for f in xrange(x.shape[-1]):
_x = x[t, z, ..., f]
_y = y[t, z, ..., f]
_magnitude = (_x**2 + _y**2)**(1./2.)
magnitude[t, ..., f] = _magnitude
return magnitude
def create_layer_elevation_from_sigma_coords(eta, sigma, depth, flatten=False, mask=None, log=False, indent=''):
'''
Generate elevation information of layers (cell center and borders) based on passed
- water-level
- bathymetry
- sigma-coordinates of layers
Calculations are performed in accordance with CF-conventions (CF 1.6)
for variable "Ocean Sigma Coordinate" as...
z(n,k,j,i) = eta(n,j,i) + sigma(k)*(depth(j,i)+eta(n,j,i))
where:
z, eta, sigma, depth - numpy arrays
n - integer, timesteps
k - integer, layers
j,i - integer, y,x indices
Args:
-----
eta (3D-array of floats):
3d array of (time, y, x) dimensions of water-level data with respect to MSL.
Down negative
sigma (1D-array of floats):
1d array of (z) dimension of sigma coordinates of layer centers.
Down negative (bottom = -1, surface = 0)
depth (2D-array of floats):
2d array of (y, x) dimensions of bathymetry data with respect to MSL.
Down positive (MSL = 0, bottom > 0)
flatten (bool):
if True, x,y dimensions of the array will be compressed into one single dimension of length x*y
mask (2D-array of bool)
boolean 2d mask of (y, x) dimensions. Is used to ignore elements during flattening.
See <process_mossco_netcdf.make_mask_array_from_mossco_bathymetry()>
log (bool):
flag to print additional info
Return:
-------
elev (4D-array of floats):
4d array of (time, z, y, x) dimensions with elevation of the layer center with respect
to the MSL. Down negative
elev_borders (5D array of floats):
5d array of (2, time, z, y, x) dimensions with elevation of layer borders with respect to MSL.
Down negative. Note: elev_borders[1, t, k, j, i] == elev_borders[0, t, k+1, j, i]
'''
_i = indent+'\t'
_n = 'create_layer_elevation_from_sigma_coords():'
sprint(_n, log=log, indent=indent, mode='bold')
sprint('Shapes of the inputs...', log=log, indent=_i)
sprint('eta: <{0}>, sigma: <{1}>, depth: <{2}>'.format(eta.shape, sigma.shape, depth.shape), log=log, indent=_i)
elev = np.zeros((eta.shape[0], len(sigma), eta.shape[1], eta.shape[2]))
elev_borders = np.zeros((2, eta.shape[0], len(sigma), eta.shape[1], eta.shape[2]))
# create sigma coordinates of the borders
sigma_borders = np.zeros(len(sigma)+1)
sigma_borders[0] = -1. # coordinate of the very bottom
for z in xrange(len(sigma)):
sigma_borders[z+1] = sigma_borders[z] - (sigma_borders[z] - sigma[z])*2
if abs(sigma_borders[-1]) > 0.005:
sprint('sigma layer centers', sigma, indent=_i, mode='fail')
sprint('sigma layer borders', sigma_borders, indent=_i, mode='fail')
raise ValueError('Sigma values for layer-borders calculated not correctly')
else:
sigma_borders[-1] = 0. # corrdinate of the very top
sprint('sigma layer centers', sigma, log=log, indent=_i)
sprint('sigma layer borders', sigma_borders, log=log, indent=_i)
for t in xrange(elev.shape[0]):
for z in xrange(elev.shape[1]):
elev[t, z, ...] = eta[t, ...] + sigma[z]*(depth + eta[t, ...])
for border in xrange(2):
elev_borders[border, t, z, ...] = eta[t, ...] + sigma_borders[z+border]*(depth + eta[t, ...])
#if log: print 't=', t, 'z=', z, 'elev:', elev[t, z, 12, 12]
#if log: print 't=', t, 'z=', z, 'elev_bnb [0]:', elev_borders[0, t, z, 12, 12]
#if log: print 't=', t, 'z=', z, 'elev_bnb [1]:', elev_borders[1, t, z, 12, 12]
sprint('Elevation array created of shape <{0}>'.format(elev.shape), log=log, indent=_i)
sprint('Elevation border array created of shape <{0}>'.format(elev_borders.shape), log=log, indent=_i)
return elev, elev_borders
def create_sigma_coords_of_layer_center(sigma_border):
'''
creates arrays of sigma-coordinates for layer centers, when a
corresponding array is given for the layer borders
'''
sigma_center = np.zeros(len(sigma_border)-1)
for z in xrange(sigma_center.__len__()):
sigma_center[z] = .5*(sigma_border[z] + sigma_border[z+1])
return sigma_center
def flatten_xy_data(data, mask=None, transpose=False):
data = np.squeeze(data)
if transpose:
data = data.T
if mask is None:
dims = list(data.shape[:-2])
dims.append(data.shape[-1]*data.shape[-2])
a = np.zeros(dims)
if len(dims) == 3:
for t in xrange(data.shape[0]):
for z in xrange(data.shape[1]):
a[t, z, :] = data[t, z, ...].flatten(order='F')
elif len(dims) == 2:
for t in xrange(data.shape[0]):
a[t, :] = data[t, ...].flatten(order='F')
elif len(dims) == 1:
a[:] = data[...].flatten(order='F')
elif len(dims) == 4 and dims[0] == 2: # if we have boundary var (i.e. Mesh2_face_bnd(two, t, z, face))
del a
dims.append(2)
a = np.zeros(dims[1::]) # make the dimension two appear at the end... (two, t, z, face) => (t, z, face, two)
for t in xrange(data.shape[1]):
for z in xrange(data.shape[2]):
for bnd in xrange(data.shape[0]):
a[t, z, :, bnd] = data[bnd, t, z, ...].flatten(order='F')
else:
raise ValueError('Number of array dimensions <{0}> is not supported.'.format(len(dims)))
else:
n_valid_2d = np.sum(np.invert(mask)) #number of valid elements in 2d part. invert - because True is an invalid element
dims = list(data.shape[:-2])
dims.append(n_valid_2d)
a = np.zeros(dims)
if len(dims) == 3:
for t in xrange(data.shape[0]):
for z in xrange(data.shape[1]):
var_masked = np.ma.array(data[t, z, ...], mask=mask)
var_masked = var_masked.flatten(order='F').compressed()
a[t, z, :] = var_masked
elif len(dims) == 2:
for t in xrange(data.shape[0]):
var_masked = np.ma.array(data[t, ...], mask=mask)
var_masked = var_masked.flatten(order='F').compressed()
a[t, :] = var_masked
elif len(dims) == 1:
var_masked = np.ma.array(data[...], mask=mask)
var_masked = var_masked.flatten(order='F').compressed()
a[:] = var_masked
elif len(dims) == 4 and dims[0] == 2: # if we have boundary var (i.e. Mesh2_face_bnd(two, t, z, face))
del a
dims.append(2)
a = np.zeros(dims[1::]) # make the dimension two appear at the end... (two, t, z, face) => (t, z, face, two)
for t in xrange(data.shape[1]):
for z in xrange(data.shape[2]):
for bnd in xrange(data.shape[0]):
var_masked = np.ma.array(data[bnd, t, z, ...], mask=mask)
var_masked = var_masked.flatten(order='F').compressed()
a[t, z, :, bnd] = var_masked
else:
raise ValueError('Number of array dimensions <{0}> is not supported.'.format(len(dims)))
return a
|
cdd1969/convert2ugrid
|
lib/process_mixed_data.py
|
Python
|
gpl-3.0
| 47,620
|
[
"NetCDF"
] |
772d19d4b45205736f27435dcd5ef7fbb42a9ad2314a756614bb6ca25331e846
|
import collections
import contextlib
import os
import shutil
import subprocess
import pytest
import yaml
from bcbio.pipeline.config_utils import load_system_config
OUTPUT_DIR = "test_automated_output"
def default_workdir():
return os.path.join(os.path.dirname(__file__), OUTPUT_DIR)
@pytest.fixture
def data_dir():
return os.path.join(os.path.dirname(__file__), "data", "automated")
@contextlib.contextmanager
def make_workdir():
remove_old_dir = True
# Specify workdir though env var, in case tests have to run not in the
# default location (e.g. to run tests on a mounted FS)
dirname = os.environ.get('BCBIO_WORKDIR', default_workdir())
if remove_old_dir:
if os.path.exists(dirname):
shutil.rmtree(dirname)
os.makedirs(dirname)
orig_dir = os.getcwd()
try:
os.chdir(dirname)
yield dirname
finally:
os.chdir(orig_dir)
@pytest.yield_fixture
def workdir():
with make_workdir() as wd:
yield wd
def get_post_process_yaml(data_dir, workdir):
"""Prepare a bcbio_system YAML file pointing to test data.
"""
try:
from bcbiovm.docker.defaults import get_datadir
datadir = data_dir or get_datadir()
sys_conf_file = os.path.join(datadir, "galaxy", "bcbio_system.yaml")
system = sys_conf_file if datadir else None
except ImportError:
system = None
if system is None or not os.path.exists(system):
try:
_, system = load_system_config(
config_file="bcbio_system.yaml", work_dir=workdir)
except ValueError:
system = None
if system is None or not os.path.exists(system):
system = os.path.join(data_dir, "post_process-sample.yaml")
# create local config pointing to reduced genomes
test_system = os.path.join(workdir, "bcbio_system.yaml")
with open(system) as in_handle:
config = yaml.load(in_handle)
config["galaxy_config"] = os.path.join(data_dir, "universe_wsgi.ini")
with open(test_system, "w") as out_handle:
yaml.dump(config, out_handle)
return test_system
@pytest.fixture
def install_test_files(data_dir):
"""Download required sequence and reference files.
"""
DlInfo = collections.namedtuple("DlInfo", "fname dirname version")
download_data = [
DlInfo("110106_FC70BUKAAXX.tar.gz", None, None),
DlInfo("genomes_automated_test.tar.gz", "genomes", 31),
DlInfo("110907_ERP000591.tar.gz", None, None),
DlInfo("100326_FC6107FAAXX.tar.gz", None, 11),
DlInfo("tcga_benchmark.tar.gz", None, 3),
DlInfo("singlecell-rnaseq-test-data.tar.gz", "Harvard-inDrop", 1)
]
for dl in download_data:
url = "http://chapmanb.s3.amazonaws.com/{fname}".format(fname=dl.fname)
dirname = os.path.join(
data_dir, os.pardir,
dl.fname.replace(".tar.gz", "") if dl.dirname is None
else dl.dirname
)
if os.path.exists(dirname) and dl.version is not None:
version_file = os.path.join(dirname, "VERSION")
is_old = True
if os.path.exists(version_file):
with open(version_file) as in_handle:
version = int(in_handle.read())
is_old = version < dl.version
if is_old:
shutil.rmtree(dirname)
if not os.path.exists(dirname):
_download_to_dir(url, dirname)
def _download_to_dir(url, dirname):
print(dirname)
cl = ["wget", url]
subprocess.check_call(cl)
cl = ["tar", "-xzvpf", os.path.basename(url)]
subprocess.check_call(cl)
shutil.move(os.path.basename(dirname), dirname)
os.remove(os.path.basename(url))
|
brainstorm/bcbio-nextgen
|
tests/conftest.py
|
Python
|
mit
| 3,789
|
[
"Galaxy"
] |
25d4d3924f317e79e362ae2b4110c779fef1ea196f0a7b5e5519d472fb6d4de9
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
## @file
"""
from PIL import (Image,
ImageEnhance)
from nupic.regions.ImageSensorFilters.BaseFilter import BaseFilter
class GaussianBlur(BaseFilter):
"""
Apply a Gaussian blur to the image.
"""
def __init__(self, level=1):
"""
@param level -- Number of times to blur.
"""
BaseFilter.__init__(self)
self.level = level
def process(self, image):
"""
@param image -- The image to process.
Returns a single image, or a list containing one or more images.
"""
BaseFilter.process(self, image)
mask = image.split()[1]
for i in xrange(self.level):
sharpness_enhancer = ImageEnhance.Sharpness(image.split()[0])
image = sharpness_enhancer.enhance(0.0)
image.putalpha(mask)
return image
|
0x0all/nupic
|
py/regions/ImageSensorFilters/GaussianBlur.py
|
Python
|
gpl-3.0
| 1,766
|
[
"Gaussian"
] |
c61221614edb31808189d32a872c3efd22309efd414b6d842038842e5de9216b
|
from __future__ import absolute_import, division, print_function
from textwrap import dedent
import _pytest._code
import py
import pytest
from _pytest.config import PytestPluginManager
from _pytest.main import EXIT_NOTESTSCOLLECTED, EXIT_USAGEERROR
@pytest.fixture(scope="module", params=["global", "inpackage"])
def basedir(request, tmpdir_factory):
from _pytest.tmpdir import tmpdir
tmpdir = tmpdir(request, tmpdir_factory)
tmpdir.ensure("adir/conftest.py").write("a=1 ; Directory = 3")
tmpdir.ensure("adir/b/conftest.py").write("b=2 ; a = 1.5")
if request.param == "inpackage":
tmpdir.ensure("adir/__init__.py")
tmpdir.ensure("adir/b/__init__.py")
return tmpdir
def ConftestWithSetinitial(path):
conftest = PytestPluginManager()
conftest_setinitial(conftest, [path])
return conftest
def conftest_setinitial(conftest, args, confcutdir=None):
class Namespace(object):
def __init__(self):
self.file_or_dir = args
self.confcutdir = str(confcutdir)
self.noconftest = False
conftest._set_initial_conftests(Namespace())
class TestConftestValueAccessGlobal(object):
def test_basic_init(self, basedir):
conftest = PytestPluginManager()
p = basedir.join("adir")
assert conftest._rget_with_confmod("a", p)[1] == 1
def test_immediate_initialiation_and_incremental_are_the_same(self, basedir):
conftest = PytestPluginManager()
len(conftest._path2confmods)
conftest._getconftestmodules(basedir)
snap1 = len(conftest._path2confmods)
#assert len(conftest._path2confmods) == snap1 + 1
conftest._getconftestmodules(basedir.join('adir'))
assert len(conftest._path2confmods) == snap1 + 1
conftest._getconftestmodules(basedir.join('b'))
assert len(conftest._path2confmods) == snap1 + 2
def test_value_access_not_existing(self, basedir):
conftest = ConftestWithSetinitial(basedir)
with pytest.raises(KeyError):
conftest._rget_with_confmod('a', basedir)
def test_value_access_by_path(self, basedir):
conftest = ConftestWithSetinitial(basedir)
adir = basedir.join("adir")
assert conftest._rget_with_confmod("a", adir)[1] == 1
assert conftest._rget_with_confmod("a", adir.join("b"))[1] == 1.5
def test_value_access_with_confmod(self, basedir):
startdir = basedir.join("adir", "b")
startdir.ensure("xx", dir=True)
conftest = ConftestWithSetinitial(startdir)
mod, value = conftest._rget_with_confmod("a", startdir)
assert value == 1.5
path = py.path.local(mod.__file__)
assert path.dirpath() == basedir.join("adir", "b")
assert path.purebasename.startswith("conftest")
def test_conftest_in_nonpkg_with_init(tmpdir):
tmpdir.ensure("adir-1.0/conftest.py").write("a=1 ; Directory = 3")
tmpdir.ensure("adir-1.0/b/conftest.py").write("b=2 ; a = 1.5")
tmpdir.ensure("adir-1.0/b/__init__.py")
tmpdir.ensure("adir-1.0/__init__.py")
ConftestWithSetinitial(tmpdir.join("adir-1.0", "b"))
def test_doubledash_considered(testdir):
conf = testdir.mkdir("--option")
conf.join("conftest.py").ensure()
conftest = PytestPluginManager()
conftest_setinitial(conftest, [conf.basename, conf.basename])
l = conftest._getconftestmodules(conf)
assert len(l) == 1
def test_issue151_load_all_conftests(testdir):
names = "code proj src".split()
for name in names:
p = testdir.mkdir(name)
p.ensure("conftest.py")
conftest = PytestPluginManager()
conftest_setinitial(conftest, names)
d = list(conftest._conftestpath2mod.values())
assert len(d) == len(names)
def test_conftest_global_import(testdir):
testdir.makeconftest("x=3")
p = testdir.makepyfile("""
import py, pytest
from _pytest.config import PytestPluginManager
conf = PytestPluginManager()
mod = conf._importconftest(py.path.local("conftest.py"))
assert mod.x == 3
import conftest
assert conftest is mod, (conftest, mod)
subconf = py.path.local().ensure("sub", "conftest.py")
subconf.write("y=4")
mod2 = conf._importconftest(subconf)
assert mod != mod2
assert mod2.y == 4
import conftest
assert conftest is mod2, (conftest, mod)
""")
res = testdir.runpython(p)
assert res.ret == 0
def test_conftestcutdir(testdir):
conf = testdir.makeconftest("")
p = testdir.mkdir("x")
conftest = PytestPluginManager()
conftest_setinitial(conftest, [testdir.tmpdir], confcutdir=p)
l = conftest._getconftestmodules(p)
assert len(l) == 0
l = conftest._getconftestmodules(conf.dirpath())
assert len(l) == 0
assert conf not in conftest._conftestpath2mod
# but we can still import a conftest directly
conftest._importconftest(conf)
l = conftest._getconftestmodules(conf.dirpath())
assert l[0].__file__.startswith(str(conf))
# and all sub paths get updated properly
l = conftest._getconftestmodules(p)
assert len(l) == 1
assert l[0].__file__.startswith(str(conf))
def test_conftestcutdir_inplace_considered(testdir):
conf = testdir.makeconftest("")
conftest = PytestPluginManager()
conftest_setinitial(conftest, [conf.dirpath()], confcutdir=conf.dirpath())
l = conftest._getconftestmodules(conf.dirpath())
assert len(l) == 1
assert l[0].__file__.startswith(str(conf))
@pytest.mark.parametrize("name", 'test tests whatever .dotdir'.split())
def test_setinitial_conftest_subdirs(testdir, name):
sub = testdir.mkdir(name)
subconftest = sub.ensure("conftest.py")
conftest = PytestPluginManager()
conftest_setinitial(conftest, [sub.dirpath()], confcutdir=testdir.tmpdir)
if name not in ('whatever', '.dotdir'):
assert subconftest in conftest._conftestpath2mod
assert len(conftest._conftestpath2mod) == 1
else:
assert subconftest not in conftest._conftestpath2mod
assert len(conftest._conftestpath2mod) == 0
def test_conftest_confcutdir(testdir):
testdir.makeconftest("assert 0")
x = testdir.mkdir("x")
x.join("conftest.py").write(_pytest._code.Source("""
def pytest_addoption(parser):
parser.addoption("--xyz", action="store_true")
"""))
result = testdir.runpytest("-h", "--confcutdir=%s" % x, x)
result.stdout.fnmatch_lines(["*--xyz*"])
assert 'warning: could not load initial' not in result.stdout.str()
def test_no_conftest(testdir):
testdir.makeconftest("assert 0")
result = testdir.runpytest("--noconftest")
assert result.ret == EXIT_NOTESTSCOLLECTED
result = testdir.runpytest()
assert result.ret == EXIT_USAGEERROR
def test_conftest_existing_resultlog(testdir):
x = testdir.mkdir("tests")
x.join("conftest.py").write(_pytest._code.Source("""
def pytest_addoption(parser):
parser.addoption("--xyz", action="store_true")
"""))
testdir.makefile(ext=".log", result="") # Writes result.log
result = testdir.runpytest("-h", "--resultlog", "result.log")
result.stdout.fnmatch_lines(["*--xyz*"])
def test_conftest_existing_junitxml(testdir):
x = testdir.mkdir("tests")
x.join("conftest.py").write(_pytest._code.Source("""
def pytest_addoption(parser):
parser.addoption("--xyz", action="store_true")
"""))
testdir.makefile(ext=".xml", junit="") # Writes junit.xml
result = testdir.runpytest("-h", "--junitxml", "junit.xml")
result.stdout.fnmatch_lines(["*--xyz*"])
def test_conftest_import_order(testdir, monkeypatch):
ct1 = testdir.makeconftest("")
sub = testdir.mkdir("sub")
ct2 = sub.join("conftest.py")
ct2.write("")
def impct(p):
return p
conftest = PytestPluginManager()
conftest._confcutdir = testdir.tmpdir
monkeypatch.setattr(conftest, '_importconftest', impct)
assert conftest._getconftestmodules(sub) == [ct1, ct2]
def test_fixture_dependency(testdir, monkeypatch):
ct1 = testdir.makeconftest("")
ct1 = testdir.makepyfile("__init__.py")
ct1.write("")
sub = testdir.mkdir("sub")
sub.join("__init__.py").write("")
sub.join("conftest.py").write(py.std.textwrap.dedent("""
import pytest
@pytest.fixture
def not_needed():
assert False, "Should not be called!"
@pytest.fixture
def foo():
assert False, "Should not be called!"
@pytest.fixture
def bar(foo):
return 'bar'
"""))
subsub = sub.mkdir("subsub")
subsub.join("__init__.py").write("")
subsub.join("test_bar.py").write(py.std.textwrap.dedent("""
import pytest
@pytest.fixture
def bar():
return 'sub bar'
def test_event_fixture(bar):
assert bar == 'sub bar'
"""))
result = testdir.runpytest("sub")
result.stdout.fnmatch_lines(["*1 passed*"])
def test_conftest_found_with_double_dash(testdir):
sub = testdir.mkdir("sub")
sub.join("conftest.py").write(py.std.textwrap.dedent("""
def pytest_addoption(parser):
parser.addoption("--hello-world", action="store_true")
"""))
p = sub.join("test_hello.py")
p.write(py.std.textwrap.dedent("""
import pytest
def test_hello(found):
assert found == 1
"""))
result = testdir.runpytest(str(p) + "::test_hello", "-h")
result.stdout.fnmatch_lines("""
*--hello-world*
""")
class TestConftestVisibility(object):
def _setup_tree(self, testdir): # for issue616
# example mostly taken from:
# https://mail.python.org/pipermail/pytest-dev/2014-September/002617.html
runner = testdir.mkdir("empty")
package = testdir.mkdir("package")
package.join("conftest.py").write(dedent("""\
import pytest
@pytest.fixture
def fxtr():
return "from-package"
"""))
package.join("test_pkgroot.py").write(dedent("""\
def test_pkgroot(fxtr):
assert fxtr == "from-package"
"""))
swc = package.mkdir("swc")
swc.join("__init__.py").ensure()
swc.join("conftest.py").write(dedent("""\
import pytest
@pytest.fixture
def fxtr():
return "from-swc"
"""))
swc.join("test_with_conftest.py").write(dedent("""\
def test_with_conftest(fxtr):
assert fxtr == "from-swc"
"""))
snc = package.mkdir("snc")
snc.join("__init__.py").ensure()
snc.join("test_no_conftest.py").write(dedent("""\
def test_no_conftest(fxtr):
assert fxtr == "from-package" # No local conftest.py, so should
# use value from parent dir's
"""))
print ("created directory structure:")
for x in testdir.tmpdir.visit():
print (" " + x.relto(testdir.tmpdir))
return {
"runner": runner,
"package": package,
"swc": swc,
"snc": snc}
# N.B.: "swc" stands for "subdir with conftest.py"
# "snc" stands for "subdir no [i.e. without] conftest.py"
@pytest.mark.parametrize("chdir,testarg,expect_ntests_passed", [
# Effective target: package/..
("runner", "..", 3),
("package", "..", 3),
("swc", "../..", 3),
("snc", "../..", 3),
# Effective target: package
("runner", "../package", 3),
("package", ".", 3),
("swc", "..", 3),
("snc", "..", 3),
# Effective target: package/swc
("runner", "../package/swc", 1),
("package", "./swc", 1),
("swc", ".", 1),
("snc", "../swc", 1),
# Effective target: package/snc
("runner", "../package/snc", 1),
("package", "./snc", 1),
("swc", "../snc", 1),
("snc", ".", 1),
])
@pytest.mark.issue616
def test_parsefactories_relative_node_ids(
self, testdir, chdir,testarg, expect_ntests_passed):
dirs = self._setup_tree(testdir)
print("pytest run in cwd: %s" %(
dirs[chdir].relto(testdir.tmpdir)))
print("pytestarg : %s" %(testarg))
print("expected pass : %s" %(expect_ntests_passed))
with dirs[chdir].as_cwd():
reprec = testdir.inline_run(testarg, "-q", "--traceconfig")
reprec.assertoutcome(passed=expect_ntests_passed)
@pytest.mark.parametrize('confcutdir,passed,error', [
('.', 2, 0),
('src', 1, 1),
(None, 1, 1),
])
def test_search_conftest_up_to_inifile(testdir, confcutdir, passed, error):
"""Test that conftest files are detected only up to a ini file, unless
an explicit --confcutdir option is given.
"""
root = testdir.tmpdir
src = root.join('src').ensure(dir=1)
src.join('pytest.ini').write('[pytest]')
src.join('conftest.py').write(_pytest._code.Source("""
import pytest
@pytest.fixture
def fix1(): pass
"""))
src.join('test_foo.py').write(_pytest._code.Source("""
def test_1(fix1):
pass
def test_2(out_of_reach):
pass
"""))
root.join('conftest.py').write(_pytest._code.Source("""
import pytest
@pytest.fixture
def out_of_reach(): pass
"""))
args = [str(src)]
if confcutdir:
args = ['--confcutdir=%s' % root.join(confcutdir)]
result = testdir.runpytest(*args)
match = ''
if passed:
match += '*%d passed*' % passed
if error:
match += '*%d error*' % error
result.stdout.fnmatch_lines(match)
def test_issue1073_conftest_special_objects(testdir):
testdir.makeconftest("""
class DontTouchMe(object):
def __getattr__(self, x):
raise Exception('cant touch me')
x = DontTouchMe()
""")
testdir.makepyfile("""
def test_some():
pass
""")
res = testdir.runpytest()
assert res.ret == 0
def test_conftest_exception_handling(testdir):
testdir.makeconftest('''
raise ValueError()
''')
testdir.makepyfile("""
def test_some():
pass
""")
res = testdir.runpytest()
assert res.ret == 4
assert 'raise ValueError()' in [line.strip() for line in res.errlines]
def test_hook_proxy(testdir):
"""Session's gethookproxy() would cache conftests incorrectly (#2016).
It was decided to remove the cache altogether.
"""
testdir.makepyfile(**{
'root/demo-0/test_foo1.py': "def test1(): pass",
'root/demo-a/test_foo2.py': "def test1(): pass",
'root/demo-a/conftest.py': """
def pytest_ignore_collect(path, config):
return True
""",
'root/demo-b/test_foo3.py': "def test1(): pass",
'root/demo-c/test_foo4.py': "def test1(): pass",
})
result = testdir.runpytest()
result.stdout.fnmatch_lines([
'*test_foo1.py*',
'*test_foo3.py*',
'*test_foo4.py*',
'*3 passed*',
])
def test_required_option_help(testdir):
testdir.makeconftest("assert 0")
x = testdir.mkdir("x")
x.join("conftest.py").write(_pytest._code.Source("""
def pytest_addoption(parser):
parser.addoption("--xyz", action="store_true", required=True)
"""))
result = testdir.runpytest("-h", x)
assert 'argument --xyz is required' not in result.stdout.str()
assert 'general:' in result.stdout.str()
|
flub/pytest
|
testing/test_conftest.py
|
Python
|
mit
| 15,885
|
[
"VisIt"
] |
252357138c5f5ea7dd8c6dd8c7caf3fff672f023e71cebffc60efb8fb3fe6d11
|
import ovito
from ovito.io import *
from ovito.vis import *
import os
import sys
print("Hello, this is OVITO %i.%i.%i" % ovito.version)
rs = RenderSettings(
filename = '/tmp/image.png',
size = (640,480),
background_color = (1.0, 1.0, 1.0)
)
rs.renderer.antialiasing = True
def main():
in_dir = sys.argv[1]
out_dir = sys.argv[2]
alphas = [0.5, 1.0, 2.0]
frames = [[],
[],
[]]
with open(os.path.join(in_dir, "desc.txt"), 'r') as f:
for line in f:
n, alpha, F0, cov = line.split()
frames[alphas.index(float(alpha))].append([float(F0), float(cov), int(n)])
if not os.path.exists(out_dir):
os.mkdir(out_dir)
node = ovito.dataset.selected_node
for i in range(len(alphas)):
conv = ""
a_dir = os.path.join(out_dir, "a%1.f" % alphas[i])
if not os.path.exists(a_dir):
os.mkdir(a_dir)
#sort by coverage
a_frames = sorted(frames[i], key=lambda x: x[1])
for frame, (F0, cov, n) in enumerate(a_frames):
if n:
xyz = os.path.join(in_dir, "all_xyz%d.xyz" % n)
if not node:
node = import_file(xyz,
columns=["Particle Type",
"Position.X",
"Position.Y",
"Position.Z"])
else:
node.source.load(xyz)
else:
node.source.load("null.xyz")
rs.filename = os.path.join(a_dir, "cluster_grid_img_%d.png" % frame)
ovito.dataset.viewports.active_vp.render(rs)
conv += "%d %d %g %g\n" % (frame, n, F0, cov)
with open(os.path.join(a_dir, "conv.txt"), 'w') as f:
f.write(conv)
if __name__ == "__main__":
main()
|
jorgehog/Deux-kMC
|
scripts/extraneighbor_clusters/ovitoviz.py
|
Python
|
gpl-3.0
| 1,939
|
[
"OVITO"
] |
1ebd87dbd20e45ec27757422e0823ec605def05ff98ade6f540752a36e648ac8
|
#!/usr/bin/env python
#
# Copyright 2007,2010 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest
import extras_swig as extras
class test_noise_source(gr_unittest.TestCase):
def setUp (self):
self.tb = gr.top_block ()
def tearDown (self):
self.tb = None
def test_001(self):
op = extras.noise_source_f32(0)
op.set_waveform("GAUSSIAN")
op.set_amplitude(10)
head = gr.head(gr.sizeof_float, 12)
dst = gr.vector_sink_f()
tb = gr.top_block()
tb.connect(op, head, dst)
tb.run()
# expected results for Gaussian with seed 0, ampl 10
expected_result = (-6.8885869979858398, 26.149959564208984,
20.575775146484375, -7.9340143203735352,
5.3359274864196777, -12.552099227905273,
6.333674430847168, -23.830753326416016,
-16.603046417236328, 2.9676761627197266,
1.2176077365875244, 15.100193977355957)
dst_data = dst.data ()
self.assertEqual (expected_result, dst_data)
if __name__ == '__main__':
gr_unittest.run(test_noise_source, "test_noise_source.xml")
|
levelrf/level_basestation
|
grextras/python/qa_noise_source.py
|
Python
|
gpl-3.0
| 1,991
|
[
"Gaussian"
] |
ad85d5fd220e1bdd0fe1b7d97e4ba1fa848a70af168029629a38a851323e7610
|
# Copyright (C) 2010 Matthew McGowan
#
# Authors:
# Matthew McGowan
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from gi.repository import Gtk, Gdk
from gi.repository import GObject
from gi.repository import Pango
from softwarecenter.utils import normalize_package_description
from softwarecenter.ui.gtk3.drawing import color_to_hex
from softwarecenter.ui.gtk3.utils import point_in
_PS = Pango.SCALE
class _SpecialCasePreParsers(object):
def preparse(self, k, desc):
if k is None:
return desc
func_name = '_%s_preparser' % k.lower().replace('-', '_')
if not hasattr(self, func_name):
return desc
f = getattr(self, func_name)
return f(desc)
# special case pre-parsers
def _skype_preparser(self, desc):
return desc.replace('. *', '.\n*')
def _texlive_fonts_extra_preparser(self, desc):
return desc.replace(')\n', ').\n').replace('--\n', '--\n\n')
class EventHelper(dict):
# FIXME: workaround for broken event.copy()
class ButtonEvent(object):
def __init__(self, event):
self.x = event.x
self.y = event.y
self.type = event.type
self.button = event.button
VALID_KEYS = (
'event',
'layout',
'index',
'within-selection',
'drag-active',
'drag-context')
def __init__(self):
dict.__init__(self)
self.new_press(None, None, None, False)
return
def __setitem__(self, k, v):
if k not in EventHelper.VALID_KEYS:
raise KeyError('\"%s\" is not a valid key' % k)
return False
return dict.__setitem__(self, k, v)
def new_press(self, event, layout, index, within_sel):
if event is None:
self['event'] = None
else:
# this should be simply event.copy() but that appears broken
# currently(?)
self['event'] = EventHelper.ButtonEvent(event)
self['layout'] = layout
self['index'] = index
self['within-selection'] = within_sel
self['drag-active'] = False
self['drag-context'] = None
return
class PangoLayoutProxy(object):
""" Because i couldn't figure out how to inherit from
pygi's Pango.Layout... """
def __init__(self, context):
self._layout = Pango.Layout.new(context)
return
def xy_to_index(self, x, y):
return self._layout.xy_to_index(x, y)
def index_to_pos(self, *args):
return self._layout.index_to_pos(*args)
# setter proxies
def set_attributes(self, attrs):
return self._layout.set_attributes(attrs)
def set_markup(self, markup):
return self._layout.set_markup(markup, -1)
def set_font_description(self, font_desc):
return self._layout.set_font_description(font_desc)
def set_wrap(self, wrap_mode):
return self._layout.set_wrap(wrap_mode)
def set_width(self, width):
return self._layout.set_width(width)
# getter proxies
def get_text(self):
return self._layout.get_text()
def get_pixel_extents(self):
return self._layout.get_pixel_extents()[1]
def get_cursor_pos(self, index):
return self._layout.get_cursor_pos(index)
def get_iter(self):
return self._layout.get_iter()
def get_extents(self):
return self._layout.get_extents()
class Layout(PangoLayoutProxy):
def __init__(self, widget, text=""):
PangoLayoutProxy.__init__(self, widget.get_pango_context())
self.widget = widget
self.length = 0
self.indent = 0
self.vspacing = None
self.is_bullet = False
self.index = 0
self.allocation = Gdk.Rectangle()
self._default_attrs = True
self.set_markup(text)
return
def __len__(self):
return self.length
def set_text(self, text):
PangoLayoutProxy.set_markup(self, text)
self.length = len(self.get_text())
def set_allocation(self, x, y, w, h):
a = self.allocation
a.x = x
a.y = y
a.width = w
a.height = h
return
def get_position(self):
return self.allocation.x, self.allocation.y
def cursor_up(self, cursor, target_x=-1):
layout = self.widget.order[cursor.paragraph]
pos = layout.index_to_pos(cursor.index)
x, y = pos.x, pos.y
if target_x >= 0:
x = target_x
y -= _PS*self.widget.line_height
return layout.xy_to_index(x, y), (x, y)
def cursor_down(self, cursor, target_x=-1):
layout = self.widget.order[cursor.paragraph]
pos = layout.index_to_pos(cursor.index)
x, y = pos.x, pos.y
if target_x >= 0:
x = target_x
y += _PS*self.widget.line_height
return layout.xy_to_index(x, y), (x, y)
def index_at(self, px, py):
#wa = self.widget.get_allocation()
x, y = self.get_position() # layout allocation
(_, index, k) = self.xy_to_index((px-x)*_PS, (py-y)*_PS)
return point_in(self.allocation, px, py), index + k
def reset_attrs(self):
#~ self.set_attributes(Pango.AttrList())
self.set_markup(self.get_text())
self._default_attrs = True
return
def highlight(self, start, end, bg, fg):
# FIXME: AttrBackground doesnt seem to be expose by gi yet??
#~ attrs = Pango.AttrList()
#~ attrs.insert(Pango.AttrBackground(bg.red, bg.green, bg.blue, start, end))
#~ attrs.insert(Pango.AttrForeground(fg.red, fg.green, fg.blue, start, end))
#~ self.set_attributes(attrs)
# XXX: workaround
text = self.get_text()
new_text = text[:start] + '<span background="%s" foreground="%s">' % (bg, fg)
new_text += text[start:end]
new_text += '</span>' + text[end:]
self.set_markup(new_text)
self._default_attrs = False
return
def highlight_all(self, bg, fg):
# FIXME: AttrBackground doesnt seem to be expose by gi yet??
#~ attrs = Pango.AttrList()
#~ attrs.insert(Pango.AttrBackground(bg.red, bg.green, bg.blue, 0, -1))
#~ attrs.insert(Pango.AttrForeground(fg.red, fg.green, fg.blue, 0, -1))
#~ self.set_attributes(attrs)
# XXX: workaround
text = self.get_text()
self.set_markup('<span background="%s" foreground="%s">%s</span>' % (bg, fg, text))
self._default_attrs = False
return
class Cursor(object):
WORD_TERMINATORS = (' ',) # empty space. suggestions recommended...
def __init__(self, parent):
self.parent = parent
self.index = 0
self.paragraph = 0
def is_min(self, cursor):
return self.get_position() <= cursor.get_position()
def is_max(self, cursor):
return self.get_position() >= cursor.get_position()
def switch(self, cursor):
this_pos = self.get_position()
other_pos = cursor.get_position()
self.set_position(*other_pos)
cursor.set_position(*this_pos)
return
def same_line(self, cursor):
return self.get_current_line()[0] == cursor.get_current_line()[0]
def get_current_line(self):
keep_going = True
i, it = self.index, self.parent.order[self.paragraph].get_iter()
ln = 0
while keep_going:
l = it.get_line()
ls = l.start_index
le = ls + l.length
if i >= ls and i <= le:
if not it.at_last_line():
le -= 1
return (self.paragraph, ln), (ls, le)
ln += 1
keep_going = it.next_line()
return None, None, None
def get_current_word(self):
keep_going = True
layout = self.parent.order[self.paragraph]
text = layout.get_text()
i, it = self.index, layout.get_iter()
start = 0
while keep_going:
j = it.get_index()
if j >= i and text[j] in self.WORD_TERMINATORS:
return self.paragraph, (start, j)
elif text[j] in self.WORD_TERMINATORS:
start = j+1
keep_going = it.next_char()
return self.paragraph, (start, len(layout))
def set_position(self, paragraph, index):
self.index = index
self.paragraph = paragraph
def get_position(self):
return self.paragraph, self.index
class PrimaryCursor(Cursor):
def __init__(self, parent):
Cursor.__init__(self, parent)
def __repr__(self):
return 'Cursor: '+str((self.paragraph, self.index))
def get_rectangle(self, layout, a):
if self.index < len(layout):
pos = layout.get_cursor_pos(self.index)[1]
else:
pos = layout.get_cursor_pos(len(layout))[1]
x = layout.allocation.x + pos.x/_PS
y = layout.allocation.y + pos.y/_PS
return x, y, 1, pos.height/_PS
def draw(self, cr, layout, a):
cr.set_source_rgb(0,0,0)
cr.rectangle(*self.get_rectangle(layout, a))
cr.fill()
return
def zero(self):
self.index = 0
self.paragraph = 0
class SelectionCursor(Cursor):
def __init__(self, cursor):
Cursor.__init__(self, cursor.parent)
self.cursor = cursor
self.target_x = None
self.target_x_indent = 0
self.restore_point = None
def __repr__(self):
return 'Selection: '+str(self.get_range())
def __nonzero__(self):
c = self.cursor
return (self.paragraph, self.index) != (c.paragraph, c.index)
@property
def min(self):
c = self.cursor
return min((self.paragraph, self.index), (c.paragraph, c.index))
@property
def max(self):
c = self.cursor
return max((self.paragraph, self.index), (c.paragraph, c.index))
def clear(self, key=None):
self.index = self.cursor.index
self.paragraph = self.cursor.paragraph
self.restore_point = None
if key not in (Gdk.KEY_uparrow, Gdk.KEY_downarrow):
self.target_x = None
self.target_x_indent = 0
def set_target_x(self, x, indent):
self.target_x = x
self.target_x_indent = indent
return
def get_range(self):
return self.min, self.max
def within_selection(self, pos):
l = list(self.get_range())
l.append(pos)
l.sort()
# sort the list, see if pos is in between the extents of the selection
# range, if it is, pos is within the selection
if pos in l:
return l.index(pos) == 1
return False
class TextBlock(Gtk.EventBox):
PAINT_PRIMARY_CURSOR = False
DEBUG_PAINT_BBOXES = False
BULLET_POINT = u' \u2022 '
def __init__(self):
Gtk.EventBox.__init__(self)
self.set_visible_window(False)
self.set_size_request(200, -1)
self.set_can_focus(True)
self.set_events(Gdk.EventMask.KEY_PRESS_MASK|
Gdk.EventMask.ENTER_NOTIFY_MASK|
Gdk.EventMask.LEAVE_NOTIFY_MASK|
Gdk.EventMask.BUTTON_RELEASE_MASK|
Gdk.EventMask.POINTER_MOTION_MASK)
self._is_new = False
self.order = []
self.cursor = cur = PrimaryCursor(self)
self.selection = sel = SelectionCursor(self.cursor)
self.clipboard = None
#~ event_helper = EventHelper()
self._update_cached_layouts()
self._test_layout = self.create_pango_layout('')
#self._xterm = Gdk.Cursor.new(Gdk.XTERM)
# popup menu and menuitem's
self.copy_menuitem = Gtk.ImageMenuItem.new_from_stock(
Gtk.STOCK_COPY, None)
self.select_all_menuitem = Gtk.ImageMenuItem.new_from_stock(
Gtk.STOCK_SELECT_ALL, None)
self.menu = Gtk.Menu()
self.menu.attach_to_widget(self, None)
self.menu.append(self.copy_menuitem)
self.menu.append(self.select_all_menuitem)
self.menu.show_all()
self.copy_menuitem.connect('select', self._menu_do_copy, sel)
self.select_all_menuitem.connect('select', self._menu_do_select_all, cur, sel)
#~ Gtk.drag_source_set(self, Gdk.ModifierType.BUTTON1_MASK,
#~ None, Gdk.DragAction.COPY)
#~ Gtk.drag_source_add_text_targets(self)
#~ self.connect('drag-begin', self._on_drag_begin)
#~ self.connect('drag-data-get', self._on_drag_data_get, sel)
event_helper = EventHelper()
self.connect('button-press-event', self._on_press, event_helper, cur, sel)
self.connect('button-release-event', self._on_release, event_helper, cur, sel)
self.connect('motion-notify-event', self._on_motion, event_helper, cur, sel)
self.connect('key-press-event', self._on_key_press, cur, sel)
self.connect('key-release-event', self._on_key_release, cur, sel)
self.connect('focus-in-event', self._on_focus_in)
self.connect('focus-out-event', self._on_focus_out)
self.connect("size-allocate", self.on_size_allocate)
self.connect('style-updated', self._on_style_updated)
return
def on_size_allocate(self, *args):
allocation = self.get_allocation()
width = allocation.width
x = y = 0
for layout in self.order:
layout.set_width(_PS*(width-layout.indent))
if layout.index > 0:
y += (layout.vspacing or self.line_height)
e = layout.get_pixel_extents()
if self.get_direction() != Gtk.TextDirection.RTL:
layout.set_allocation(e.x+layout.indent, y+e.y,
width-layout.indent, e.height)
else:
layout.set_allocation(x+width-e.x-e.width-layout.indent-1, y+e.y,
width-layout.indent, e.height)
y += e.y + e.height
return
# overrides
def do_get_request_mode(self):
return Gtk.SizeRequestMode.HEIGHT_FOR_WIDTH
def do_get_preferred_height_for_width(self, width):
height = 0
layout = self._test_layout
for l in self.order:
layout.set_text(l.get_text(), -1)
layout.set_width(_PS*(width-l.indent))
lh = layout.get_pixel_extents()[1].height
height += lh + (l.vspacing or self.line_height)
height = max(50, height)
return height, height
def do_draw(self, cr):
self.render(self, cr)
return
def _config_colors(self):
context = self.get_style_context()
context.save()
context.add_class(Gtk.STYLE_CLASS_HIGHLIGHT)
state = self.get_state_flags()
if self.has_focus():
state |= Gtk.StateFlags.FOCUSED
context.set_state(state)
self._bg = color_to_hex(context.get_background_color(state))
self._fg = color_to_hex(context.get_color(state))
context.restore()
return
def _on_style_updated(self, widget):
self._config_colors()
self._update_cached_layouts()
return
# def _on_drag_begin(self, widgets, context, event_helper):
# print 'drag: begin'
# return
def _on_drag_data_get(self, widget, context, selection, info, timestamp, sel):
# print 'drag: get data'
text = self.get_selected_text(sel)
selection.set_text(text, -1)
return
def _on_focus_in(self, widget, event):
self._config_colors()
return
def _on_focus_out(self, widget, event):
self._config_colors()
return
def _on_motion(self, widget, event, event_helper, cur, sel):
if not (event.state == Gdk.ModifierType.BUTTON1_MASK):# or not self.has_focus():
return
# check if we have moved enough to count as a drag
press = event_helper['event']
# mvo: how can this be?
if not press: return
start_x, start_y = int(press.x), int(press.y)
cur_x, cur_y = int(event.x), int(event.y)
if (not event_helper['drag-active'] and
self.drag_check_threshold(start_x, start_y, cur_x, cur_y)):
event_helper['drag-active'] = True
if not event_helper['drag-active']:
return
#~ if (event_helper['within-selection'] and
#~ not event_helper['drag-context']):
#~ target_list = Gtk.TargetList()
#~ target_list.add_text_targets(80)
#~ ctx = self.drag_begin(target_list, # target list
#~ Gdk.DragAction.COPY, # action
#~ 1, # initiating button
#~ event) # event
#~
#~ event_helper['drag-context'] = ctx
#~ return
for layout in self.order:
point_in, index = layout.index_at(cur_x, cur_y)
if point_in:
cur.set_position(layout.index, index)
self.queue_draw()
break
def _on_press(self, widget, event, event_helper, cur, sel):
if sel and not self.has_focus():
self.grab_focus()
return # spot the difference
if not self.has_focus():
self.grab_focus()
if event.button == 3:
self._button3_action(cur, sel, event)
return
elif event.button != 1:
return
for layout in self.order:
x, y = int(event.x), int(event.y)
point_in, index = layout.index_at(x, y)
if point_in:
within_sel = False
#~ within_sel = sel.within_selection((layout.index, index))
if not within_sel:
cur.set_position(layout.index, index)
sel.clear()
#~ event_helper.new_press(event.copy(), layout, index, within_sel)
event_helper.new_press(event, layout, index, within_sel)
break
return
def _on_release(self, widget, event, event_helper, cur, sel):
if not event_helper['event']: return
# check if a drag occurred
if event_helper['drag-active']:
# if so, do not handle release
return
# else, handle release, do click
cur.set_position(event_helper['layout'].index,
event_helper['index'])
sel.clear()
press = event_helper['event']
if (press.type == Gdk.EventType._2BUTTON_PRESS):
self._2click_select(cur, sel)
elif (press.type == Gdk.EventType._3BUTTON_PRESS):
self._3click_select(cur, sel)
self.queue_draw()
return
def _menu_do_copy(self, item, sel):
self._copy_text(sel)
def _menu_do_select_all(self, item, cur, sel):
self._select_all(cur, sel)
def _button3_action(self, cur, sel, event):
start, end = sel.get_range()
self.copy_menuitem.set_sensitive(True)
self.select_all_menuitem.set_sensitive(True)
if not sel:
self.copy_menuitem.set_sensitive(False)
elif start == (0, 0) and \
end == (len(self.order)-1, len(self.order[-1])):
self.select_all_menuitem.set_sensitive(False)
self.menu.popup(None, # parent_menu_shell,
None, # parent_menu_item,
None, # GtkMenuPositionFunc func,
None, # data,
event.button,
event.time)
return
def _on_key_press(self, widget, event, cur, sel):
kv = event.keyval
s, i = cur.paragraph, cur.index
handled_keys = True
ctrl = (event.state & Gdk.ModifierType.CONTROL_MASK) > 0
shift = (event.state & Gdk.ModifierType.SHIFT_MASK) > 0
if not self.PAINT_PRIMARY_CURSOR and \
kv in (Gdk.KEY_uparrow, Gdk.KEY_downarrow) and not sel:
return False
if kv == Gdk.KEY_Tab:
handled_keys = False
elif kv == Gdk.KEY_Left:
if ctrl:
self._select_left_word(cur, sel, s, i)
else:
self._select_left(cur, sel, s, i, shift)
if shift:
layout = self._get_cursor_layout()
pos = layout.index_to_pos(cur.index)
sel.set_target_x(pos.x, layout.indent)
elif kv == Gdk.KEY_Right:
if ctrl:
self._select_right_word(cur, sel, s, i)
else:
self._select_right(cur, sel, s, i, shift)
if shift:
layout = self._get_cursor_layout()
pos = layout.index_to_pos(cur.index)
sel.set_target_x(pos.x, layout.indent)
elif kv == Gdk.KEY_Up:
if ctrl:
if i == 0:
if s > 0:
cur.paragraph -= 1
cur.set_position(cur.paragraph, 0)
elif sel and not shift:
cur.set_position(*sel.min)
else:
self._select_up(cur, sel)
elif kv == Gdk.KEY_Down:
if ctrl:
if i == len(self._get_layout(cur)):
if s+1 < len(self.order):
cur.paragraph += 1
i = len(self._get_layout(cur))
cur.set_position(cur.paragraph, i)
elif sel and not shift:
cur.set_position(*sel.max)
else:
self._select_down(cur, sel)
elif kv == Gdk.KEY_Home:
if shift:
self._select_home(cur, sel, self.order[cur.paragraph])
else:
cur.set_position(0, 0)
elif kv == Gdk.KEY_End:
if shift:
self._select_end(cur, sel, self.order[cur.paragraph])
else:
cur.paragraph = len(self.order)-1
cur.index = len(self._get_layout(cur))
else:
handled_keys = False
if not shift and handled_keys:
sel.clear(kv)
self.queue_draw()
return handled_keys
def _on_key_release(self, widget, event, cur, sel):
ctrl = (event.state & Gdk.ModifierType.CONTROL_MASK) > 0
if ctrl:
if event.keyval == Gdk.KEY_a:
self._select_all(cur, sel)
elif event.keyval == Gdk.KEY_c:
self._copy_text(sel)
self.queue_draw()
return
def _select_up(self, cur, sel):
#~ if sel and not cur.is_min(sel) and cur.same_line(sel):
#~ cur.switch(sel)
s = cur.paragraph
layout = self._get_layout(cur)
if sel.target_x:
x = sel.target_x
if sel.target_x_indent:
x += (sel.target_x_indent - layout.indent) * _PS
(_, j, k), (x, y) = layout.cursor_up(cur, x)
j += k
else:
(_, j, k), (x, y) = layout.cursor_up(cur)
j += k
sel.set_target_x(x, layout.indent)
if (s, j) != cur.get_position():
cur.set_position(s, j)
elif s > 0:
cur.paragraph = s-1
layout = self._get_layout(cur)
if sel.target_x_indent:
x += (sel.target_x_indent - layout.indent) * _PS
y = layout.get_extents()[0].height
(_, j, k) = layout.xy_to_index(x, y)
cur.set_position(s-1, j+k)
else:
return False
return True
def _select_down(self, cur, sel):
#~ if sel and not cur.is_max(sel) and cur.same_line(sel):
#~ cur.switch(sel)
s = cur.paragraph
layout = self._get_layout(cur)
if sel.target_x:
x = sel.target_x
if sel.target_x_indent:
x += (sel.target_x_indent - layout.indent) * _PS
(_, j, k), (x, y) = layout.cursor_down(cur, x)
j += k
else:
(_, j, k), (x, y) = layout.cursor_down(cur)
j += k
sel.set_target_x(x, layout.indent)
if (s, j) != cur.get_position():
cur.set_position(s, j)
elif s < len(self.order) - 1:
cur.paragraph = s+1
layout = self._get_layout(cur)
if sel.target_x_indent:
x += (sel.target_x_indent - layout.indent) * _PS
y = 0
(_, j, k) = layout.xy_to_index(x, y)
cur.set_position(s+1, j+k)
else:
return False
return True
def _2click_select(self, cursor, sel):
self._select_word(cursor, sel)
return
def _3click_select(self, cursor, sel):
# XXX:
# _select_line seems to expose the following Pango issue:
# (description.py:3892): Pango-CRITICAL **:
# pango_layout_line_unref: assertion `private->ref_count > 0'
# failed
# ... which can result in a segfault
#~ self._select_line(cursor, sel)
self._select_all(cursor, sel)
return
def _copy_text(self, sel):
text = self.get_selected_text(sel)
if not self.clipboard:
display = Gdk.Display.get_default()
selection = Gdk.Atom.intern("CLIPBOARD", False)
self.clipboard = Gtk.Clipboard.get_for_display(display, selection)
self.clipboard.clear()
self.clipboard.set_text(text.strip(), -1)
return
def _select_end(self, cur, sel, layout):
if not cur.is_max(sel):
cur.switch(sel)
n, r, line = cur.get_current_line()
cur_pos = cur.get_position()
if cur_pos == (len(self.order)-1, len(self.order[-1])): # absolute end
if sel.restore_point:
# reinstate restore point
cur.set_position(*sel.restore_point)
else:
# reselect the line end
n, r, line = sel.get_current_line()
cur.set_position(n[0], r[1])
elif cur_pos[1] == len(self.order[n[0]]): # para end
# select abs end
cur.set_position(len(self.order)-1, len(self.order[-1]))
elif cur_pos == (n[0], r[1]): # line end
# select para end
cur.set_position(n[0], len(self.order[n[0]]))
else: # not at any end, within line somewhere
# select line end
if sel:
sel.restore_point = cur_pos
cur.set_position(n[0], r[1])
return
def _select_home(self, cur, sel, layout):
if not cur.is_min(sel):
cur.switch(sel)
n, r, line = cur.get_current_line()
cur_pos = cur.get_position()
if cur_pos == (0, 0): # absolute home
if sel.restore_point:
cur.set_position(*sel.restore_point)
else:
n, r, line = sel.get_current_line()
cur.set_position(n[0], r[0])
elif cur_pos[1] == 0: # para home
cur.set_position(0,0)
elif cur_pos == (n[0], r[0]): # line home
cur.set_position(n[0], 0)
else: # not at any home, within line somewhere
if sel:
sel.restore_point = cur_pos
cur.set_position(n[0], r[0])
return
def _select_left(self, cur, sel, s, i, shift):
if not shift and not cur.is_min(sel):
cur.switch(sel)
return
if i > 0:
cur.set_position(s, i-1)
elif cur.paragraph > 0:
cur.paragraph -= 1
cur.set_position(s-1, len(self._get_layout(cur)))
return
def _select_right(self, cur, sel, s, i, shift):
if not shift and not cur.is_max(sel):
cur.switch(sel)
return
if i < len(self._get_layout(cur)):
cur.set_position(s, i+1)
elif s < len(self.order)-1:
cur.set_position(s+1, 0)
return
def _select_left_word(self, cur, sel, s, i):
if i > 0:
cur.index -= 1
elif s > 0:
cur.paragraph -= 1
cur.index = len(self._get_layout(cur))
paragraph, word = cur.get_current_word()
if not word: return
cur.set_position(paragraph, max(0, word[0]-1))
return
def _select_right_word(self, cur, sel, s, i):
ll = len(self._get_layout(cur))
if i < ll:
cur.index += 1
elif s+1 < len(self.order):
cur.paragraph += 1
cur.index = 0
paragraph, word = cur.get_current_word()
if not word: return
cur.set_position(paragraph, min(word[1]+1, ll))
return
def _select_word(self, cursor, sel):
paragraph, word = cursor.get_current_word()
if word:
cursor.set_position(paragraph, word[1]+1)
sel.set_position(paragraph, word[0])
if self.get_direction() == Gtk.TextDirection.RTL:
cursor.switch(sel)
return
def _select_line(self, cursor, sel):
n, r = self.cursor.get_current_line()
sel.set_position(n[0], r[0])
cursor.set_position(n[0], r[1])
if self.get_direction() == Gtk.TextDirection.RTL:
cursor.switch(sel)
return
def _select_all(self, cursor, sel):
layout = self.order[-1]
sel.set_position(0, 0)
cursor.set_position(layout.index, len(layout))
if self.get_direction() == Gtk.TextDirection.RTL:
cursor.switch(sel)
return
def _selection_copy(self, layout, sel, new_para=True):
i = layout.index
start, end = sel.get_range()
if new_para:
text = '\n\n'
else:
text = ''
if sel and i >= start[0] and i <= end[0]:
if i == start[0]:
if end[0] > i:
return text+layout.get_text()[start[1]: len(layout)]
else:
return text+layout.get_text()[start[1]: end[1]]
elif i == end[0]:
if start[0] < i:
return text+layout.get_text()[0: end[1]]
else:
return text+layout.get_text()[start[1]: end[1]]
else:
return text+layout.get_text()
return ''
def _new_layout(self, text=''):
layout = Layout(self, text)
layout.set_wrap(Pango.WrapMode.WORD_CHAR)
return layout
def _update_cached_layouts(self):
self._bullet = self._new_layout()
self._bullet.set_markup(self.BULLET_POINT)
font_desc = Pango.FontDescription()
font_desc.set_weight(Pango.Weight.BOLD)
self._bullet.set_font_description(font_desc)
e = self._bullet.get_pixel_extents()
self.indent, self.line_height = e.width, e.height
return
def _selection_highlight(self, layout, sel, bg, fg):
i = layout.index
start, end = sel.get_range()
if sel and i >= start[0] and i <= end[0]:
if i == start[0]:
if end[0] > i:
layout.highlight(start[1], len(layout), bg, fg)
else:
layout.highlight(start[1], end[1], bg, fg)
elif i == end[0]:
if start[0] < i:
layout.highlight(0, end[1], bg, fg)
else:
layout.highlight(start[1], end[1], bg, fg)
else:
layout.highlight_all(bg, fg)
elif not layout._default_attrs:
layout.reset_attrs()
return
def _paint_bullet_point(self, cr, x, y):
# draw the layout
Gtk.render_layout(self.get_style_context(),
cr, # state
x, # x coord
y, # y coord
self._bullet._layout) # a Pango.Layout()
def _get_layout(self, cursor):
return self.order[cursor.paragraph]
def _get_cursor_layout(self):
return self.order[self.cursor.paragraph]
def _get_selection_layout(self):
return self.order[self.selection.paragraph]
def render(self, widget, cr):
if not self.order:
return
a = self.get_allocation()
for layout in self.order:
lx, ly = layout.get_position()
self._selection_highlight(layout,
self.selection,
self._bg, self._fg)
if layout.is_bullet:
if self.get_direction() != Gtk.TextDirection.RTL:
indent = layout.indent - self.indent
else:
indent = a.width - layout.indent
self._paint_bullet_point(cr, indent, ly)
if self.DEBUG_PAINT_BBOXES:
la = layout.allocation
cr.rectangle(la.x, la.y, la.width, la.height)
cr.set_source_rgb(1,0,0)
cr.stroke()
# draw the layout
Gtk.render_layout(self.get_style_context(),
cr,
lx, # x coord
ly, # y coord
layout._layout) # a Pango.Layout()
# draw the cursor
if self.PAINT_PRIMARY_CURSOR and self.has_focus():
self.cursor.draw(cr, self._get_layout(self.cursor), a)
return
def append_paragraph(self, p, vspacing=None):
l = self._new_layout()
l.index = len(self.order)
l.vspacing = vspacing
l.set_text(p)
self.order.append(l)
return
def append_bullet(self, point, indent_level, vspacing=None):
l = self._new_layout()
l.index = len(self.order)
l.indent = self.indent * (indent_level + 1)
l.vspacing = vspacing
l.is_bullet = True
l.set_text(point)
self.order.append(l)
return
def copy_clipboard(self):
self._copy_text(self.selection)
return
def get_selected_text(self, sel=None):
text = ''
if not sel:
sel = self.selection
for layout in self.order:
text += self._selection_copy(layout, sel, (layout.index > 0))
return text
def select_all(self):
self._select_all(self.cursor, self.selection)
self.queue_draw()
return
def finished(self):
self.queue_resize()
return
def clear(self, key=None):
self.cursor.zero()
self.selection.clear(key)
self.order = []
return
class AppDescription(Gtk.VBox):
TYPE_PARAGRAPH = 0
TYPE_BULLET = 1
_preparser = _SpecialCasePreParsers()
def __init__(self):
Gtk.VBox.__init__(self)
self.description = TextBlock()
self.pack_start(self.description, False, False, 0)
self._prev_type = None
return
def _part_is_bullet(self, part):
# normalize_description() ensures that we only have "* " bullets
i = part.find("* ")
return i > -1, i
def _parse_desc(self, desc, pkgname):
""" Attempt to maintain original fixed width layout, while
reconstructing the description into text blocks
(either paragraphs or bullets) which are line-wrap friendly.
"""
# pre-parse descrition if special case exists for the given pkgname
desc = self._preparser.preparse(pkgname, desc)
parts = normalize_package_description(desc).split('\n')
for part in parts:
if not part: continue
is_bullet, indent = self._part_is_bullet(part)
if is_bullet:
self.append_bullet(part, indent)
else:
self.append_paragraph(part)
self.description.finished()
return
def clear(self):
self.description.clear()
return
def append_paragraph(self, p):
vspacing = self.description.line_height
self.description.append_paragraph(p.strip(), vspacing)
self._prev_type = self.TYPE_PARAGRAPH
return
def append_bullet(self, point, indent_level):
if self._prev_type == self.TYPE_BULLET:
vspacing = int(0.4*self.description.line_height)
else:
vspacing = self.description.line_height
self.description.append_bullet(
point[indent_level+2:], indent_level, vspacing)
self._prev_type = self.TYPE_BULLET
return
def set_description(self, raw_desc, pkgname):
self.clear()
if type(raw_desc) == str:
encoded_desc = unicode(raw_desc, 'utf8').encode('utf8')
else:
encoded_desc = raw_desc.encode('utf8')
desc = GObject.markup_escape_text(encoded_desc)
self._parse_desc(desc, pkgname)
self.show_all()
return
# easy access to some TextBlock methods
def copy_clipboard(self):
return TextBlock.copy_clipboard(self.description)
def get_selected_text(self):
return TextBlock.get_selected_text(self.description)
def select_all(self):
return TextBlock.select_all(self.description)
def get_test_description_window():
EXAMPLE0 = """
p7zip is the Unix port of 7-Zip, a file archiver that archives with very high compression ratios.
p7zip-full provides:
- /usr/bin/7za a standalone version of the 7-zip tool that handles
7z archives (implementation of the LZMA compression algorithm) and some other formats.
- /usr/bin/7z not only does it handle 7z but also ZIP, Zip64, CAB, RAR, ARJ, GZIP,
BZIP2, TAR, CPIO, RPM, ISO and DEB archives. 7z compression is 30-50% better than ZIP compression.
p7zip provides 7zr, a light version of 7za, and p7zip a gzip like wrapper around 7zr.""".strip()
EXAMPLE1 = """Transmageddon supports almost any format as its input and can generate a very large host of output files. The goal of the application was to help people to create the files they need to be able to play on their mobile devices and for people not hugely experienced with multimedia to generate a multimedia file without having to resort to command line tools with ungainly syntaxes.
The currently supported codecs are:
* Containers:
- Ogg
- Matroska
- AVI
- MPEG TS
- flv
- QuickTime
- MPEG4
- 3GPP
- MXT
* Audio encoders:
- Vorbis
- FLAC
- MP3
- AAC
- AC3
- Speex
- Celt
* Video encoders:
- Theora
- Dirac
- H264
- MPEG2
- MPEG4/DivX5
- xvid
- DNxHD
It also provide the support for the GStreamer's plugins auto-search."""
EXAMPLE2 = """File-roller is an archive manager for the GNOME environment. It allows you to:
* Create and modify archives.
* View the content of an archive.
* View a file contained in an archive.
* Extract files from the archive.
File-roller supports the following formats:
* Tar (.tar) archives, including those compressed with
gzip (.tar.gz, .tgz), bzip (.tar.bz, .tbz), bzip2 (.tar.bz2, .tbz2),
compress (.tar.Z, .taz), lzip (.tar.lz, .tlz), lzop (.tar.lzo, .tzo),
lzma (.tar.lzma) and xz (.tar.xz)
* Zip archives (.zip)
* Jar archives (.jar, .ear, .war)
* 7z archives (.7z)
* iso9660 CD images (.iso)
* Lha archives (.lzh)
* Single files compressed with gzip (.gz), bzip (.bz), bzip2 (.bz2),
compress (.Z), lzip (.lz), lzop (.lzo), lzma (.lzma) and xz (.xz)
File-roller doesn't perform archive operations by itself, but relies on standard tools for this."""
EXAMPLE3 = """This package includes the following CTAN packages:
Asana-Math -- A font to typeset maths in Xe(La)TeX.
albertus --
allrunes -- Fonts and LaTeX package for almost all runes.
antiqua -- the URW Antiqua Condensed Font.
antp -- Antykwa Poltawskiego: a Type 1 family of Polish traditional type.
antt -- Antykwa Torunska: a Type 1 family of a Polish traditional type.
apl -- Fonts for typesetting APL programs.
ar -- Capital A and capital R ligature for Apsect Ratio.
archaic -- A collection of archaic fonts.
arev -- Fonts and LaTeX support files for Arev Sans.
ascii -- Support for IBM "standard ASCII" font.
astro -- Astronomical (planetary) symbols.
atqolive --
augie -- Calligraphic font for typesetting handwriting.
auncial-new -- Artificial Uncial font and LaTeX support macros.
aurical -- Calligraphic fonts for use with LaTeX in T1 encoding.
barcodes -- Fonts for making barcodes.
bayer -- Herbert Bayers Universal Font For Metafont.
bbding -- A symbol (dingbat) font and LaTeX macros for its use.
bbm -- "Blackboard-style" cm fonts.
bbm-macros -- LaTeX support for "blackboard-style" cm fonts.
bbold -- Sans serif blackboard bold.
belleek -- Free replacement for basic MathTime fonts.
bera -- Bera fonts.
blacklettert1 -- T1-encoded versions of Haralambous old German fonts.
boisik -- A font inspired by Baskerville design.
bookhands -- A collection of book-hand fonts.
braille -- Support for braille.
brushscr -- A handwriting script font.
calligra -- Calligraphic font.
carolmin-ps -- Adobe Type 1 format of Carolingian Minuscule fonts.
cherokee -- A font for the Cherokee script.
clarendo --
cm-lgc -- Type 1 CM-based fonts for Latin, Greek and Cyrillic.
cmbright -- Computer Modern Bright fonts.
cmll -- Symbols for linear logic.
cmpica -- A Computer Modern Pica variant.
coronet --
courier-scaled -- Provides a scaled Courier font.
cryst -- Font for graphical symbols used in crystallography.
cyklop -- The Cyclop typeface.
dancers -- Font for Conan Doyle's "The Dancing Men".
dice -- A font for die faces.
dictsym -- DictSym font and macro package
dingbat -- Two dingbat symbol fonts.
doublestroke -- Typeset mathematical double stroke symbols.
dozenal -- Typeset documents using base twelve numbering (also called
"dozenal")
duerer -- Computer Duerer fonts.
duerer-latex -- LaTeX support for the Duerer fonts.
ean -- Macros for making EAN barcodes.
ecc -- Sources for the European Concrete fonts.
eco -- Oldstyle numerals using EC fonts.
eiad -- Traditional style Irish fonts.
eiad-ltx -- LaTeX support for the eiad font.
elvish -- Fonts for typesetting Tolkien Elvish scripts.
epigrafica -- A Greek and Latin font.
epsdice -- A scalable dice "font".
esvect -- Vector arrows.
eulervm -- Euler virtual math fonts.
euxm --
feyn -- A font for in-text Feynman diagrams.
fge -- A font for Frege's Grundgesetze der Arithmetik.
foekfont -- The title font of the Mads Fok magazine.
fonetika -- Support for the danish "Dania" phonetic system.
fourier -- Using Utopia fonts in LaTeX documents.
fouriernc -- Use New Century Schoolbook text with Fourier maths fonts.
frcursive -- French cursive hand fonts.
garamond --
genealogy -- A compilation genealogy font.
gfsartemisia -- A modern Greek font design.
gfsbodoni -- A Greek and Latin font based on Bodoni.
gfscomplutum -- A Greek font with a long history.
gfsdidot -- A Greek font based on Didot's work.
gfsneohellenic -- A Greek font in the Neo-Hellenic style.
gfssolomos -- A Greek-alphabet font.
gothic -- A collection of old German-style fonts.
greenpoint -- The Green Point logo.
groff --
grotesq -- the URW Grotesk Bold Font.
hands -- Pointing hand font.
hfbright -- The hfbright fonts.
hfoldsty -- Old style numerals with EC fonts.
ifsym -- A collection of symbols.
inconsolata -- A monospaced font, with support files for use with TeX.
initials -- Adobe Type 1 decorative initial fonts.
iwona -- A two-element sans-serif font.
junicode -- A TrueType font for mediaevalists.
kixfont -- A font for KIX codes.
knuthotherfonts --
kpfonts -- A complete set of fonts for text and mathematics.
kurier -- A two-element sans-serif typeface.
lettrgth --
lfb -- A Greek font with normal and bold variants.
libertine -- Use the font Libertine with LaTeX.
libris -- Libris ADF fonts, with LaTeX support.
linearA -- Linear A script fonts.
logic -- A font for electronic logic design.
lxfonts -- Set of slide fonts based on CM.
ly1 -- Support for LY1 LaTeX encoding.
marigold --
mathabx -- Three series of mathematical symbols.
mathdesign -- Mathematical fonts to fit with particular text fonts.
mnsymbol -- Mathematical symbol font for Adobe MinionPro.
nkarta -- A "new" version of the karta cartographic fonts.
ocherokee -- LaTeX Support for the Cherokee language.
ogham -- Fonts for typesetting Ogham script.
oinuit -- LaTeX Support for the Inuktitut Language.
optima --
orkhun -- A font for orkhun script.
osmanian -- Osmanian font for writing Somali.
pacioli -- Fonts designed by Fra Luca de Pacioli in 1497.
pclnfss -- Font support for current PCL printers.
phaistos -- Disk of Phaistos font.
phonetic -- MetaFont Phonetic fonts, based on Computer Modern.
pigpen -- A font for the pigpen (or masonic) cipher.
psafm --
punk -- Donald Knuth's punk font.
recycle -- A font providing the "recyclable" logo.
sauter -- Wide range of design sizes for CM fonts.
sauterfonts -- Use sauter fonts in LaTeX.
semaphor -- Semaphore alphabet font.
simpsons -- MetaFont source for Simpsons characters.
skull -- A font to draw a skull.
staves -- Typeset Icelandic staves and runic letters.
tapir -- A simple geometrical font.
tengwarscript -- LaTeX support for using Tengwar fonts.
trajan -- Fonts from the Trajan column in Rome.
umtypewriter -- Fonts to typeset with the xgreek package.
univers --
universa -- Herbert Bayer's 'universal' font.
venturisadf -- Venturis ADF fonts collection.
wsuipa -- International Phonetic Alphabet fonts.
yfonts -- Support for old German fonts.
zefonts -- Virtual fonts to provide T1 encoding from existing fonts."""
EXAMPLE4 = """Arista is a simple multimedia transcoder, it focuses on being easy to use by making complex task of encoding for various devices simple.
Users should pick an input and a target device, choose a file to save to and go. Features:
* Presets for iPod, computer, DVD player, PSP, Playstation 3, and more.
* Live preview to see encoded quality.
* Automatically discover available DVD media and Video 4 Linux (v4l) devices.
* Rip straight from DVD media easily (requires libdvdcss).
* Rip straight from v4l devices.
* Simple terminal client for scripting.
* Automatic preset updating."""
def on_clicked(widget, desc_widget, descs):
widget.position += 1
if widget.position >= len(descs):
widget.position = 0
desc_widget.set_description(*descs[widget.position])
return
descs = ((EXAMPLE0,''),
(EXAMPLE1,''),
(EXAMPLE2,''),
(EXAMPLE3,'texlive-fonts-extra'),
(EXAMPLE4,''))
win = Gtk.Window()
win.set_default_size(300, 400)
win.set_has_resize_grip(True)
vb = Gtk.VBox()
win.add(vb)
b = Gtk.Button('Next test description >>')
b.position = 0
vb.pack_start(b, False, False, 0)
scroll = Gtk.ScrolledWindow()
vb.add(scroll)
d = AppDescription()
#~ d.description.DEBUG_PAINT_BBOXES = True
d.set_description(EXAMPLE0, pkgname='')
scroll.add_with_viewport(d)
win.show_all()
b.connect("clicked", on_clicked, d, descs)
win.connect('destroy',lambda x: Gtk.main_quit())
return win
if __name__ == '__main__':
win = get_test_description_window()
win.show_all()
Gtk.main()
|
armikhael/software-center
|
softwarecenter/ui/gtk3/widgets/description.py
|
Python
|
gpl-3.0
| 48,326
|
[
"ADF",
"DIRAC"
] |
f2e8e39242d76d04dc110e046d47f293e85ea80dc79d4b42cdc3763545923675
|
# -*- coding:utf-8 -*-
#
# This file contains a class and main function to convert giellatekno xml
# formatted files to pure text
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this file. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright 2013-2014 Børre Gaup <borre.gaup@uit.no>
#
from lxml import etree
import StringIO
import os
import sys
import argparse
import argparse_version
class XMLPrinter:
"""This is a class to convert giellatekno xml formatted files to plain text
"""
def __init__(self,
lang=None,
all_paragraphs=False,
title=False,
listitem=False,
table=False,
correction=False,
error=False,
errorort=False,
errorortreal=False,
errormorphsyn=False,
errorsyn=False,
errorlex=False,
errorlang=False,
foreign=False,
noforeign=False,
typos=False,
print_filename=False,
one_word_per_line=False,
disambiguation=False,
dependency=False,
hyph_replacement=''):
'''The handling of error* elements are governed by the error*,
noforeign, correction, typos and one_word_per_line arguments.
If one_word_per_line and typos are False and correction is True, the
content of the correct attribute should be printed instead of the
.text part of the error element.
If one_word_per_line or typos are True, the .text part, the correct
attribute and the other attributes of the error* element should be
printed out on one line.
If typos is True and some of the error* options are True, only the
elements that are True should be output
If one_word_per_line is True and some of the error* options are True,
only the elements that are True should get the error treatment, the
other ones get treated as plain elements.
If noforeign is True, neither the errorlang.text part nor the correct
attribute should be printed.
'''
self.paragraph = True
self.all_paragraphs = all_paragraphs
if title or listitem or table:
self.paragraph = False
self.title = title
self.listitem = listitem
self.table = table
self.correction = correction
self.error = error
self.errorort = errorort
self.errorortreal = errorortreal
self.errormorphsyn = errormorphsyn
self.errorsyn = errorsyn
self.errorlex = errorlex
self.errorlang = errorlang
self.noforeign = noforeign
if (error or
errorort or
errorortreal or
errormorphsyn or
errorsyn or
errorlex or
errorlang):
self.error_filtering = True
else:
self.error_filtering = False
self.typos = typos
self.print_filename = print_filename
if self.typos:
self.one_word_per_line = True
else:
self.one_word_per_line = one_word_per_line
if lang and lang.startswith('!'):
self.lang = lang[1:]
self.invert_lang = True
else:
self.lang = lang
self.invert_lang = False
self.disambiguation = disambiguation
self.dependency = dependency
if hyph_replacement == 'xml':
self.hyph_replacement = '<hyph/>'
else:
self.hyph_replacement = hyph_replacement
def get_lang(self):
"""
Get the lang of the file
"""
return self.etree.getroot().\
attrib['{http://www.w3.org/XML/1998/namespace}lang']
def get_element_language(self, element, parentlang):
"""Get the language of element.
Elements inherit the parents language if not explicitely set
"""
if element.get('{http://www.w3.org/XML/1998/namespace}lang') is None:
return parentlang
else:
return element.get('{http://www.w3.org/XML/1998/namespace}lang')
def collect_not_inline_errors(self, element, textlist):
'''Add the formatted errors as strings to the textlist list
'''
error_string = self.error_not_inline(element)
if error_string != '':
textlist.append(error_string)
for child in element:
if self.visit_error_not_inline(child):
self.collect_not_inline_errors(child, textlist)
if not self.typos:
if element.tail is not None and element.tail.strip() != '':
if not self.one_word_per_line:
textlist.append(element.tail.strip())
else:
textlist.append('\n'.join(element.tail.strip().split()))
def error_not_inline(self, element):
'''Collect and format element.text, element.tail and
the attributes into the string text
Also scan the children if there is no error filtering or
if the element is filtered
'''
text = ''
if element.text is not None and element.text.strip() != '':
text = element.text.strip()
if not self.error_filtering or self.include_this_error(element):
for child in element:
if text != '':
text += ' '
if child.tag == 'span' and element.tag == 'errorsyn':
text += child.text
else:
try:
text += child.get('correct')
except TypeError:
print >>sys.stderr, 'Unexpected error element'
print >>sys.stderr, etree.tostring(child,
encoding='utf8')
print >>sys.stderr, 'To fix this error you must \
fix the errormarkup in the original document:'
print >>sys.stderr, self.filename
if child.tail is not None and child.tail.strip() != '':
text += u' {}'.format(child.tail.strip())
text += self.get_error_attributes(dict(element.attrib))
return text
def get_error_attributes(self, attributes):
'''Collect and format the attributes + the filename
into the string text.
'''
text = '\t'
text += attributes.get('correct')
del attributes['correct']
attr = []
for key in sorted(attributes):
attr.append(u'{}={}'.format(key,
unicode(attributes[key])))
if len(attr) > 0:
text += '\t#'
text += ','.join(attr)
if self.print_filename:
text += u', file: {}'.format(
os.path.basename(self.filename).decode('utf8'))
elif self.print_filename:
text += u'\t#file: {}'.format(
os.path.basename(self.filename).decode('utf8'))
return text
def collect_inline_errors(self, element, textlist, parentlang):
'''Add the "correct" element to the list textlist
'''
if element.get('correct') is not None and not self.noforeign:
textlist.append(element.get('correct'))
self.get_tail(element, textlist, parentlang)
def collect_text(self, element, parentlang, buffer):
"""Collect text from element, and write the contents to buffer
"""
textlist = []
self.visit_nonerror_element(element, textlist, parentlang)
if len(textlist) > 0:
if not self.one_word_per_line:
buffer.write(' '.join(textlist).encode('utf8'))
buffer.write(' ¶\n')
else:
buffer.write('\n'.join(textlist).encode('utf8'))
buffer.write('\n')
def get_contents(self, elt_contents, textlist, elt_lang):
if elt_contents is not None:
text = elt_contents.strip()
if text != '' and (
self.lang is None or
(not self.invert_lang and elt_lang == self.lang) or
(self.invert_lang and elt_lang != self.lang)):
if not self.one_word_per_line:
textlist.append(text)
else:
textlist.append('\n'.join(text.split()))
def get_text(self, element, textlist, parentlang):
'''Get the text part of an lxml element
'''
self.get_contents(element.text,
textlist,
self.get_element_language(element, parentlang))
def get_tail(self, element, textlist, parentlang):
'''Get the tail part of an lxml element
'''
self.get_contents(element.tail,
textlist,
parentlang)
def visit_children(self, element, textlist, parentlang):
"""Visit the children of element, adding their content to textlist
"""
for child in element:
if child.tag == 'errorlang' and self.noforeign and self.typos:
pass
elif child.tag == 'errorlang' and self.noforeign:
self.get_tail(child, textlist, parentlang)
elif self.visit_error_inline(child):
self.collect_inline_errors(
child,
textlist,
self.get_element_language(child, parentlang))
elif self.visit_error_not_inline(child):
self.collect_not_inline_errors(child, textlist)
else:
self.visit_nonerror_element(
child,
textlist,
self.get_element_language(element, parentlang))
def visit_nonerror_element(self, element, textlist, parentlang):
"""Visit and extract text from non error element
"""
if not self.typos:
self.get_text(element, textlist, parentlang)
self.visit_children(element, textlist, parentlang)
if not self.typos:
self.get_tail(element, textlist, parentlang)
def visit_this_node(self, element):
'''Return True if the element should be visited
'''
return (
self.all_paragraphs or
(
self.paragraph is True and (element.get('type') is None or
element.get('type') == 'text')
) or (
self.title is True and element.get('type') == 'title'
) or (
self.listitem is True and element.get('type') == 'listitem'
) or (
self.table is True and element.get('type') == 'tablecell'
)
)
def visit_error_not_inline(self, element):
"""Determine whether element should be visited
"""
return (
element.tag.startswith('error') and self.one_word_per_line and not
self.error_filtering or
self.include_this_error(element)
)
def visit_error_inline(self, element):
"""Determine whether element should be visited
"""
return (element.tag.startswith('error') and not
self.one_word_per_line and
(self.correction or self.include_this_error(element))
)
def include_this_error(self, element):
"""Determine whether element should be visited
"""
return self.error_filtering and (
(element.tag == 'error' and self.error) or
(element.tag == 'errorort' and self.errorort) or
(element.tag == 'errorortreal' and self.errorortreal) or
(element.tag == 'errormorphsyn' and self.errormorphsyn) or
(element.tag == 'errorsyn' and self.errorsyn) or
(element.tag == 'errorlex' and self.errorlex) or
(element.tag == 'errorlang' and self.errorlang) or
(element.tag == 'errorlang' and self.noforeign)
)
def parse_file(self, filename):
self.filename = filename
p = etree.XMLParser(huge_tree=True)
self.etree = etree.parse(filename, p)
def process_file(self):
"""Process the given file, adding the text into buffer
Returns the buffer
"""
buffer = StringIO.StringIO()
if self.hyph_replacement is not None:
self.handle_hyph()
if self.dependency:
self.print_element(self.etree.find('.//dependency'), buffer)
elif self.disambiguation:
self.print_element(self.etree.find('.//disambiguation'), buffer)
else:
for paragraph in self.etree.findall('.//p'):
if self.visit_this_node(paragraph):
self.collect_text(paragraph, self.get_lang(), buffer)
return buffer
def handle_hyph(self):
"""Replace hyph tags
"""
hyph_tails = []
for hyph in self.etree.findall('.//hyph'):
if hyph.tail is not None:
hyph_tails.append(hyph.tail)
if hyph.getnext() is None:
if hyph.getparent().text is not None:
hyph_tails.insert(0, hyph.getparent().text)
hyph.getparent().text = self.hyph_replacement.join(hyph_tails)
hyph_tails[:] = []
hyph.getparent().remove(hyph)
def print_element(self, element, buffer):
if element is not None and element.text is not None:
buffer.write(element.text.encode('utf8'))
def print_file(self, file_):
'''Print a xml file to stdout'''
if file_.endswith('.xml'):
self.parse_file(file_)
sys.stdout.write(self.process_file().getvalue())
def parse_options():
"""Parse the options given to the program
"""
parser = argparse.ArgumentParser(
parents=[argparse_version.parser],
description='Print the contents of a corpus in XML format\n\
The default is to print paragraphs with no type (=text type).')
parser.add_argument('-l',
dest='lang',
help='Print only elements in language LANG. Default \
is all langs.')
parser.add_argument('-T',
dest='title',
action='store_true',
help='Print paragraphs with title type', )
parser.add_argument('-L',
dest='list',
action='store_true',
help='Print paragraphs with list type')
parser.add_argument('-t',
dest='table',
action='store_true',
help='Print paragraphs with table type')
parser.add_argument('-a',
dest='all_paragraphs',
action='store_true',
help='Print all text elements')
parser.add_argument('-c',
dest='corrections',
action='store_true',
help='Print corrected text instead of the original \
typos & errors')
parser.add_argument('-C',
dest='error',
action='store_true',
help='Only print unclassified (§/<error..>) \
corrections')
parser.add_argument('-ort',
dest='errorort',
action='store_true',
help='Only print ortoghraphic, non-word \
($/<errorort..>) corrections')
parser.add_argument('-ortreal',
dest='errorortreal',
action='store_true',
help='Only print ortoghraphic, real-word \
(¢/<errorortreal..>) corrections')
parser.add_argument('-morphsyn',
dest='errormorphsyn',
action='store_true',
help='Only print morphosyntactic \
(£/<errormorphsyn..>) corrections')
parser.add_argument('-syn',
dest='errorsyn',
action='store_true',
help='Only print syntactic (¥/<errorsyn..>) \
corrections')
parser.add_argument('-lex',
dest='errorlex',
action='store_true',
help='Only print lexical (€/<errorlex..>) \
corrections')
parser.add_argument('-foreign',
dest='errorlang',
action='store_true',
help='Only print foreign (∞/<errorlang..>) \
corrections')
parser.add_argument('-noforeign',
dest='noforeign',
action='store_true',
help='Do not print anything from foreign \
(∞/<errorlang..>) corrections')
parser.add_argument('-typos',
dest='typos',
action='store_true',
help='Print only the errors/typos in the text, with \
corrections tab-separated')
parser.add_argument('-f',
dest='print_filename',
action='store_true',
help='Add the source filename as a comment after each \
error word.')
parser.add_argument('-S',
dest='one_word_per_line',
action='store_true',
help='Print the whole text one word per line; \
typos have tab separated corrections')
parser.add_argument('-dis',
dest='disambiguation',
action='store_true',
help='Print the disambiguation element')
parser.add_argument('-dep',
dest='dependency',
action='store_true',
help='Print the dependency element')
parser.add_argument('-hyph',
dest='hyph_replacement',
default='',
help='Replace hyph tags with the given argument')
parser.add_argument('targets',
nargs='+',
help='Name of the files or directories to process. \
If a directory is given, all files in this directory \
and its subdirectories will be listed.')
args = parser.parse_args()
return args
def main():
"""Set up the XMLPrinter class with the given command line options and
process the given files and directories
Print the output to stdout
"""
args = parse_options()
xml_printer = XMLPrinter(lang=args.lang,
all_paragraphs=args.all_paragraphs,
title=args.title,
listitem=args.list,
table=args.table,
correction=args.corrections,
error=args.error,
errorort=args.errorort,
errorortreal=args.errorortreal,
errormorphsyn=args.errormorphsyn,
errorsyn=args.errorsyn,
errorlex=args.errorlex,
errorlang=args.errorlang,
noforeign=args.noforeign,
typos=args.typos,
print_filename=args.print_filename,
one_word_per_line=args.one_word_per_line,
dependency=args.dependency,
disambiguation=args.disambiguation,
hyph_replacement=args.hyph_replacement)
for target in args.targets:
if os.path.exists(target):
if os.path.isfile(target):
xml_printer.print_file(target)
elif os.path.isdir(target):
for root, _, files in os.walk(target):
for xml_file in files:
xml_printer.print_file(os.path.join(root, xml_file))
else:
print >>sys.stderr, '{} does not exist'.format(target)
if __name__ == '__main__':
main()
|
unhammer/gt-CorpusTools
|
corpustools/ccat.py
|
Python
|
gpl-3.0
| 21,390
|
[
"VisIt"
] |
ad2d00e9464eba3d1baac85d502aba061488cd42ee5cbe964752c535c557ae1a
|
from __future__ import print_function
import sys
import os
from scipy.io import netcdf as nc
import numpy as np
import hashlib
import pytest
@pytest.mark.usefixtures('prepare_to_test')
class TestDiagnosticOutput:
def test_coverage(self, exp):
"""
Test that all available diagnostics can be dumped.
"""
# Check that none of the experiments unfinished diags have been
# implemented, if so the unifinished_diags list should be updated.
assert(not any([os.path.exists(d.output) for d in exp.get_unfinished_diags()]))
# Check that diags that should have been written out are.
assert(len(exp.get_available_diags()) > 0)
assert(all([os.path.exists(d.output) for d in exp.get_available_diags()]))
def test_valid(self, exp):
"""
Check that that all output diagnostics are valid.
Validity checks:
- contain the expected variable
- the variable contains data
- that data doesn't contain NaNs.
"""
for d in exp.get_available_diags():
with nc.netcdf_file(d.output) as f:
assert(d.name in f.variables.keys())
data = f.variables[d.name][:].copy()
assert(len(data) > 0)
if hasattr(data, 'mask'):
assert(not data.mask.all())
assert(not np.isnan(np.sum(data)))
def test_checksums(self, exp):
"""
Test that checksums of diagnostic output are the same
as a baseline.
Note that diagnostic output needs to be in netCDF3 format for this
checksum to be reproducible.
"""
checksum_file = os.path.join(exp.path, 'diag_checksums.txt')
tmp_file = os.path.join(exp.path, 'tmp_diag_checksums.txt')
new_checksums = ''
for d in exp.get_available_diags():
with open(d.output, 'rb') as f:
checksum = hashlib.md5(f.read()).hexdigest()
new_checksums += '{}:{}\n'.format(os.path.basename(d.output),
checksum)
# Read in the baseline and check against calculated.
with open(checksum_file) as f:
baseline = f.read()
if baseline != new_checksums:
with open(tmp_file, 'w') as f:
f.write(new_checksums)
print('Error: diagnostic checksums do not match.',
file=sys.stderr)
print('Compare {} and {}'.format(checksum_file, tmp_file),
file=sys.stderr)
print('If the difference is expected then' \
' update {}'.format(checksum_file), file=sys.stderr)
assert(baseline == new_checksums)
|
aidanheerdegen/MOM6-examples
|
tools/tests/test_diagnostic_output.py
|
Python
|
gpl-3.0
| 2,751
|
[
"NetCDF"
] |
ccbba5d69cd7c0ab0c303fba049d1165b1a9d1eb84a0e14a7a8dda9f15de771f
|
#!/usr/bin/env python
__author__ = 'Manuel Holtgrewe <manuel.holtgrewe@fu-berlin.de>'
import os.path
import re
import clang.cindex as ci
RULE_NAMING_CONSTANT = 'naming.constant'
RULE_NAMING_STRUCT = 'naming.struct'
RULE_NAMING_UNION = 'naming.union'
RULE_NAMING_CLASS = 'naming.class'
RULE_NAMING_ENUM = 'naming.enum'
RULE_NAMING_FIELD = 'naming.field'
RULE_NAMING_ENUM_CONSTANT = 'naming.enum_constant'
RULE_NAMING_VARIABLE = 'naming.variable'
RULE_NAMING_FUNCTION = 'naming.function'
RULE_NAMING_PARAMETER = 'naming.parameter'
RULE_NAMING_VARIABLE = 'naming.variable'
RULE_NAMING_CXX_METHOD = 'naming.method'
RULE_NAMING_TYPEDEF = 'naming.typedef'
RULE_NAMING_TPL_NON_TYPE_PARAMETER = 'naming.tpl_nontype_param'
RULE_NAMING_TPL_TYPE_PARAMETER = 'naming.tpl_type_param'
RULE_NAMING_TPL_TPL_PARAMETER = 'naming.tpl_tpl_param'
RULE_NAMING_FUNCTION_TPL = 'naming.function_tpl'
RULE_NAMING_CLASS_TPL = 'naming.class_tpl'
RULE_NAMING_CLASS_TPL_SPEC = 'naming.class_tpl_spec'
RE_CONSTANT = r'^[A-Z]([_A-Z0-9])*_*$'
RE_VARIABLE = r'^_?[a-z]([_a-zA-Z0-9])*_*$'
RE_FUNCTION = r'operator.*|^_?[a-z]([_a-zA-Z0-9])*\(.*\)_*$'
RE_TYPE = r'^[A-Z]([a-zA-Z0-9])*_*$'
RE_TYPE_TEMPLATE = r'^[A-Z]([a-zA-Z0-9])*_*<.*>$'
RE_STRUCT = r'^[A-Z]([a-zA-Z0-9])*_*(<.*>)?$'
RULE_TEXTS = {
RULE_NAMING_CONSTANT: 'Constant names must be all upper-case, separated by underscores.',
RULE_NAMING_STRUCT: 'Struct names must be all camel case, starting with upper case.',
RULE_NAMING_UNION: 'Union names must be all camel case, starting with upper case.',
RULE_NAMING_CLASS: 'Class names must be all camel case, starting with upper case.',
RULE_NAMING_ENUM: 'Enum names must be all camel case, starting with upper case.',
RULE_NAMING_FIELD: 'Field names must be camel case case, starting with lower case.',
RULE_NAMING_ENUM_CONSTANT: 'Enum constant names must be all upper-case, separated by underscores.',
RULE_NAMING_VARIABLE: 'Variable names must be camel case case, starting with lower case.',
RULE_NAMING_FUNCTION: 'Function names must be camel case case, starting with lower case.',
RULE_NAMING_PARAMETER: 'Parameter names must be camel case case, starting with lower case.',
RULE_NAMING_CXX_METHOD: 'Method names must be camel case case, starting with lower case.',
RULE_NAMING_TYPEDEF: 'Typedef names must be all camel case, starting with upper case.',
RULE_NAMING_TPL_NON_TYPE_PARAMETER: 'Template non-type parameters must be all upper-case, separated by underscores.',
RULE_NAMING_TPL_TYPE_PARAMETER: 'Template type parameter names must be all camel case, starting with upper case.',
RULE_NAMING_TPL_TPL_PARAMETER: 'Template template parameter names must be all camel case, starting with upper case.',
RULE_NAMING_FUNCTION_TPL: 'Function template names must be camel case case, starting with lower case.',
RULE_NAMING_CLASS_TPL: 'Class template names must be all camel case, starting with upper case.',
RULE_NAMING_CLASS_TPL_SPEC: 'Partial specialization names must be all camel case, starting with upper case.',
}
class RuleViolation(object):
def __init__(self, rule_id, violator, file, line, column):
self.rule_id = rule_id
self.violator = violator
self.file = file
self.line = line
self.column = column
def key(self):
return (self.rule_id, self.line, self.column, self.violator)
def __str__(self):
msg = '%s [%s:%d/%d] "%s": %s'
return msg % (self.rule_id, self.file, self.line, self.column,
self.violator, RULE_TEXTS[self.rule_id])
def _hasFileLocation(node):
"""Return True if node has a file lcoation."""
if not hasattr(node, 'location'):
return False
if not hasattr(node.location, 'file'):
return False
if not node.location.file:
return False
if not hasattr(node.location.file, 'name'):
return False
if not node.location.file.name:
return False
return True
class AllYesRule(object):
"""A rule that allows all visiting and checks always evaluate to True."""
def allowVisit(self, node):
return True
def allowRecurse(self, node):
return True
def check(self, node):
return []
class GenericSymbolNameRule(AllYesRule):
def __init__(self, kind, regular_ex, rule_name):
self.kind = kind
self.regular_ex = regular_ex
self.rule_name = rule_name
def allowVisit(self, node):
if not _hasFileLocation(node):
return False
displayname = ci.Cursor_displayname(node)
if not displayname:
return False # Ignore empty symbols.
# print 'allow visit template type?', displayname, node.kind
if node.kind == self.kind:
return True
return False
def check(self, node):
displayname = ci.Cursor_displayname(node)
print 'checking', displayname
import pdb; pdb.set_trace()
if not re.match(self.regular_ex, displayname):
v = RuleViolation(
self.rule_name, displayname, node.location.file.name,
node.location.line, node.location.column)
return [v]
return []
class InIncludeDirsRule(AllYesRule):
"""Rule to block visiting and recursion outside include dirs."""
def __init__(self, include_dirs):
self.include_dirs = [os.path.abspath(x) for x in include_dirs]
self.cache = {}
def allowVisit(self, node):
"""Return True if visiting is allowed."""
if node.kind == ci.CursorKind.TRANSLATION_UNIT:
return True
if not _hasFileLocation(node):
return False
if self.cache.has_key(node.location.file.name):
return self.cache[node.location.file.name]
# Check whether node's location is below the include directories.
filename = os.path.abspath(node.location.file.name)
result = False
for x in self.include_dirs:
if filename.startswith(x):
# print filename, x
result = True
break
self.cache[node.location.file.name] = result
return result
def allowRecurse(self, node):
"""Return True if we want to recurse below node."""
return self.allowVisit(node)
|
bkahlert/seqan-research
|
raw/workshop11/workshop2011-data-20110925/trunk/util/py_lib/seqan/pyclangcheck/rules.py
|
Python
|
mit
| 6,359
|
[
"VisIt"
] |
2f0c0907c0fb35aa04fc7b694cba0aa66331340ada32ae5fc2fcfc3649c4776e
|
# class generated by DeVIDE::createDeVIDEModuleFromVTKObject
from module_kits.vtk_kit.mixins import SimpleVTKClassModuleBase
import vtk
class vtkExtractSelectedGraph(SimpleVTKClassModuleBase):
def __init__(self, module_manager):
SimpleVTKClassModuleBase.__init__(
self, module_manager,
vtk.vtkExtractSelectedGraph(), 'Processing.',
('vtkAbstractGraph', 'vtkSelection'), ('vtkGraph',),
replaceDoc=True,
inputFunctions=None, outputFunctions=None)
|
nagyistoce/devide
|
modules/vtk_basic/vtkExtractSelectedGraph.py
|
Python
|
bsd-3-clause
| 518
|
[
"VTK"
] |
c78d1fee237f12b3a15f687d3a84e57e667af6ca2b0553ed8d92033594a5f370
|
# gcompris - world_explore_template.py
#
# Copyright (C) 2012 Beth Hadley
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
#
# world_explore_template
'''
HOW-TO USE THIS TEMPLATE:
Please visit http://gcompris.net/wiki/Adding_an_explore_activity#Instructions_to_Develop_an_Explore_Activity
'''
import gobject
import gtk
import gtk.gdk
import gcompris
import gcompris.utils
import gcompris.skin
import goocanvas
import pango
import ConfigParser
import gcompris.sound
import gcompris.bonus
from gcompris import gcompris_gettext as _
from random import randint
import random
TEXT_BG_COLOR = 0xCCCCCC99L
class Gcompris_explore:
def __init__(self, gcomprisBoard):
self.gcomprisBoard = gcomprisBoard
self.gcomprisBoard.level = 1
self.gcomprisBoard.maxlevel = 2
# Needed to get key_press
gcomprisBoard.disable_im_context = True
self.activityDataFilePath = '/' + self.gcomprisBoard.name + '/'
self.numLocations = 0 # the total number of locations in this activity
self.remainingItems = [] # list of all items still to be played during level 2 and 3
self.allSoundClips = [] # list of sounds be played extracted from content.desktop.in
self.allTextPrompts = [] # list of text be played extracted from content.desktop.in
self.locationSeen = 0
self.progressBar = None
self.next_action = None
self.first_run = True
def start(self):
'''
method called to create 'home-page', the world map with all the locations.
This method is re-called whenever 'Go Back To Map' button is pressed
by any of the location pages.
'''
gcompris.set_default_background(self.gcomprisBoard.canvas.get_root_item())
# suspend system sound
self.saved_policy = gcompris.sound.policy_get()
gcompris.sound.policy_set(gcompris.sound.PLAY_AND_INTERRUPT)
gcompris.sound.pause()
self.display_level(self.gcomprisBoard.level)
if self.gcomprisBoard.mode == "audio" \
and not (gcompris.get_properties().fx):
gcompris.utils.dialog(_("Error: This activity cannot be \
played with the\nsound effects disabled.\nGo to the configuration \
dialogue to\nenable the sound."), None)
def display_level(self, x=None, y=None, z=None):
# Create a rootitem.
if hasattr(self, 'rootitem'):
self.rootitem.remove()
self.rootitem = goocanvas.Group(parent=
self.gcomprisBoard.canvas.get_root_item())
# silence any currently playing music
if not self.first_run:
gcompris.sound.play_ogg('boards/sounds/silence1s.ogg')
self.first_run = False
level = self.gcomprisBoard.level
# set the game bar in the bottom left
gcompris.bar_set(gcompris.BAR_LEVEL)
gcompris.bar_set_level(self.gcomprisBoard)
gcompris.bar_location(20, -1, 0.6)
self.locationSeen = 0
# -------------------------------------------------------------
# Load Background Image
# -------------------------------------------------------------
if not hasattr(self, 'data'):
self.read_data() # read in the data from content.desktop.in file
# only allow second level if content file has the tag 'SoundMatchingGameText'
if hasattr(self, 'SoundMatchingGameText'):
self.gcomprisBoard.maxlevel = 3
self.svghandle = gcompris.utils.load_svg(self.activityDataFilePath + self.background)
goocanvas.Svg(
parent = self.rootitem,
svg_handle = self.svghandle,
svg_id = self.backSvgId,
pointer_events = goocanvas.EVENTS_NONE
)
self.drawLocations()
if level == 1:
self.writeText(self.generalText)
else:
# prepare game for play
self.progressBar = ProgressBar( self.rootitem,
200, 480, 400, 25,
len(self.data.sections()) - 1 )
if level == 2 and self.gcomprisBoard.maxlevel == 3:
self.remainingItems = self.allSoundClips[:]
self.writeText(self.SoundMatchingGameText)
# PLAY BUTTON
self.playButton = goocanvas.Image(
parent=self.rootitem,
pixbuf=gcompris.utils.load_pixmap('explore/playbutton.png'),
x=65,
y=110,
)
self.playButton.connect("button_press_event", self.playCurrentMusicSelection)
self.writeText(_('Click to play sound'), 100, 70)
gcompris.utils.item_focus_init(self.playButton, None)
self.playRandom()
elif level == 3 or level == 2:
self.remainingItems = self.allTextPrompts[:]
self.writeText(self.TextMatchingGameText)
self.playRandom()
def next_level(self):
if self.gcomprisBoard.level == self.gcomprisBoard.maxlevel:
self.set_level( 1 )
else:
self.set_level( self.gcomprisBoard.level + 1 )
def writeText(self, txt, x=100, y=250, width=150):
'''
write text box with background rectangle to game
the text is returned and must be removed by the caller
'''
# A group that will hold the text description and the background
textrootitem = goocanvas.Group(parent=
self.rootitem)
t = goocanvas.Text(
parent = textrootitem,
x=x,
y=y,
width=width,
font = gcompris.skin.get_font("gcompris/board/medium"),
text = txt,
anchor=gtk.ANCHOR_CENTER,
alignment=pango.ALIGN_CENTER,
use_markup=True
)
TG = 10
bounds = t.get_bounds()
rect = goocanvas.Rect(parent = textrootitem,
x=bounds.x1 - TG,
y=bounds.y1 - TG,
width=bounds.x2 - bounds.x1 + TG * 2,
height=bounds.y2 - bounds.y1 + TG * 2,
line_width=2.0,
radius_x = 3.0,
radius_y = 3.0,
fill_color_rgba = TEXT_BG_COLOR,
stroke_color = "black")
t.raise_(rect)
return textrootitem
def drawLocations(self):
'''
draw image on the map, one for each section in content.desktop.in at the
location specified in the file by 'x' and 'y'.
'''
if self.gcomprisBoard.level == 1:
method = self.goto_location
else:
method = self.checkAnswer
for section in self.sectionNames:
item = goocanvas.Svg(
parent = self.rootitem,
svg_handle = self.svghandle,
svg_id = self.data.get(section, 'svgId'),
)
gcompris.utils.item_focus_init(item, None)
item.set_data('sectionNum', section)
item.set_data('seen', False)
# Set the proper callback depending on the level
item.connect("button_press_event", method)
def location_quit(self, widget=None, target=None, event=None, location_rootitem=None):
'''
called when the user click on end in the location panel display
the main display is just shown again and the location panel is removed.
'''
self.rootitem.props.visibility = goocanvas.ITEM_VISIBLE
location_rootitem.remove()
# silence any currently playing music
gcompris.sound.play_ogg('boards/sounds/silence1s.ogg')
# All the items have been seen, let's start the level 2
if self.locationSeen == len(self.sectionNames):
self.next_action = self.next_level
gcompris.bonus.display(gcompris.bonus.WIN, gcompris.bonus.SMILEY)
def goto_location(self, widget=None, target=None, event=None):
'''
method called when student clicks on one of the ellipses.
method loads the location page, including the text, picture, music, and question.
'''
bounds = target.get_bounds()
seen = target.get_data('seen')
if not seen:
target.set_data('seen', True)
self.locationSeen += 1
pixmap = gcompris.utils.load_pixmap('explore/star.png')
goocanvas.Image(parent = self.rootitem,
x = bounds.x1 + \
(bounds.x2 - bounds.x1) / 2.0 - \
pixmap.get_width() / 2.0,
y = bounds.y2 - pixmap.get_height() / 2.0,
pixbuf = pixmap
)
self.rootitem.props.visibility = goocanvas.ITEM_INVISIBLE
if hasattr(self, 'location_rootitem'):
self.location_rootitem.remove()
self.location_rootitem = goocanvas.Group(parent=
self.gcomprisBoard.canvas.get_root_item())
sectionNum = target.get_data('sectionNum')
goocanvas.Image(parent=self.location_rootitem, x=10, y=10,
pixbuf=gcompris.utils.load_pixmap('explore/border.png'))
# draw back button
txt = _('Back to Homepage')
self.backButton = goocanvas.Text(
parent=self.location_rootitem,
x=400,
y=495,
text = txt,
font = gcompris.skin.get_font("gcompris/board/medium bold"),
anchor=gtk.ANCHOR_CENTER,
alignment=pango.ALIGN_CENTER,
use_markup=True
)
self.backButton.connect("button_press_event", self.location_quit, self.location_rootitem)
gcompris.utils.item_focus_init(self.backButton, None)
# ---------------------------------------------------------------------
# WRITE LOCATION-SPECIFIC CONTENT TO PAGE
# ---------------------------------------------------------------------
name = _(self.data.get(sectionNum, '_title'))
goocanvas.Text(
parent=self.location_rootitem,
x=410,
y=50,
text = name,
font = gcompris.skin.get_font("gcompris/board/big bold"),
fill_color="black",
anchor=gtk.ANCHOR_CENTER,
alignment=pango.ALIGN_CENTER,
use_markup=True
)
text = _(self.data.get(sectionNum, '_text'))
t = goocanvas.Text(
parent=self.location_rootitem,
x=170,
y=120,
width=240,
text=_(text),
font = gcompris.skin.get_font("gcompris/board/medium"),
fill_color="black",
anchor=gtk.ANCHOR_N,
alignment=pango.ALIGN_CENTER
)
image = self.data.get(sectionNum, 'image')
goocanvas.Image(
parent=self.location_rootitem,
x=300,
y=120,
pixbuf=gcompris.utils.load_pixmap(self.activityDataFilePath + image)
)
try:
music = str(self.data.get(sectionNum, 'music'))
gcompris.sound.play_ogg(self.activityDataFilePath + music)
except: pass
def checkAnswer(self, widget=None, target=None, event=None):
'''
check to see if the student pressed the correct answer.
'''
if target.get_data('sectionNum') == self.currentSelection[1] and \
self.currentSelection in self.remainingItems:
self.remainingItems.remove(self.currentSelection)
self.progressBar.success()
if len(self.remainingItems):
self.next_action = self.playRandom
else:
self.next_action = self.next_level
gcompris.bonus.display(gcompris.bonus.WIN, gcompris.bonus.SMILEY)
else:
gcompris.bonus.display(gcompris.bonus.LOOSE, gcompris.bonus.SMILEY)
def playRandom(self):
''' call playRandomSong or playRandomText depending on the current
level '''
level = self.gcomprisBoard.level
if level == 2 and self.gcomprisBoard.maxlevel == 3:
self.playRandomSong()
elif level == 3 or level == 2:
self.playRandomText()
def playRandomSong(self):
'''
play a random sound clip for use in the second level
'''
if self.remainingItems:
self.currentSelection = \
self.remainingItems[randint(0, len(self.remainingItems) - 1)]
self.playCurrentMusicSelection()
def playRandomText(self):
if self.remainingItems:
self.currentSelection = \
self.remainingItems[randint(0, len(self.remainingItems) - 1)]
if hasattr(self, 'randomtext'):
self.randomtext.remove()
self.randomtext = self.writeText(self.currentSelection[0],
self.textBoxX, self.textBoxY, 200)
def playCurrentMusicSelection(self, x=None, y=None, z=None):
gcompris.sound.play_ogg(self.activityDataFilePath +
self.currentSelection[0])
def set_level(self, level):
'''
updates the level for the game when child clicks on bottom
left navigation bar to increment level
'''
self.gcomprisBoard.level = level
gcompris.bar_set_level(self.gcomprisBoard)
self.display_level(self.gcomprisBoard.level)
# --------------------------------------------------------------------------
# METHODS TO DEAL WITH INPUT & OUTPUT OF CONTENT FILE
# --------------------------------------------------------------------------
# FYI: at first I didn't see any need to make a class to handle the data, but in
# retrospect having a data object would have been much cleaner.
def read_data(self):
'''
method to read in the data from content.desktop.in. Saves this data as
self.data for reference later.
'''
#self.data = ConfigParser.RawConfigParser() # the data that is parsed from
config = ConfigParser.RawConfigParser()
filename = gcompris.DATA_DIR + '/' + self.gcomprisBoard.name + '/content.desktop.in'
try:
gotit = config.read(filename)
if not gotit:
gcompris.utils.dialog(_("Cannot find the file '{filename}'").\
format(filename=filename),
None)
return False
except ConfigParser.Error, error:
gcompris.utils.dialog(_("Failed to parse data set '{filename}'"
" with error:\n{error}").\
format(filename=filename, error=error),
None)
return False
self.data = config
self.parseData()
def parseData(self):
'''
extract the data from the content file
'''
self.sectionNames = []
errors = []
for section in self.data.sections():
if section == 'common':
try: self.credits = self.data.get('common', 'credits')
except: self.credits = ''
try: self.background = self.data.get('common', 'background')
except: errors.append("Missing 'background' key")
try: self.backSvgId = self.data.get('common', 'backSvgId')
except: errors.append("Missing 'background' key")
try: self.author = self.data.get('common', 'author')
except: self.author = ''
try: self.generalText = _(self.data.get('common', '_GeneralText'))
except: errors.append("Missing '_GeneralText' key")
try: self.SoundMatchingGameText = _(self.data.get('common', '_SoundMatchingGameText'))
except:pass
try: self.TextMatchingGameText = _(self.data.get('common', '_TextMatchingGameText'))
except:pass
try: self.textBoxX = int(self.data.get('common', 'textBoxX'))
except:pass
try: self.textBoxY = int(self.data.get('common', 'textBoxY'))
except:pass
else:
try:
self.allSoundClips.append( (self.data.get(section, 'music'), section))
except:
pass
self.allTextPrompts.append( ( _(self.data.get(section, '_shortPrompt')), section))
self.sectionNames.append(section)
if len(errors):
gcompris.utils.dialog( "\n".join(errors), None)
def end(self):
# silence any currently playing music
gcompris.sound.play_ogg('boards/sounds/silence1s.ogg')
self.rootitem.remove()
if hasattr(self, 'location_rootitem'):
self.location_rootitem.remove()
gcompris.sound.policy_set(self.saved_policy)
gcompris.sound.resume()
def ok(self):
pass
def repeat(self):
pass
def config_stop(self):
pass
def config_start(self, profile):
pass
def key_press(self, keyval, commit_str, preedit_str):
utf8char = gtk.gdk.keyval_to_unicode(keyval)
strn = u'%c' % utf8char
def pause(self, pause):
if not pause and self.next_action:
self.next_action()
self.next_action = None
class ProgressBar:
def __init__(self, rootitem, x, y, width, height, number_of_sections):
'''
display an empty progress bar
'''
self.rootitem = rootitem
self.x = x
self.y = y
self.width = width
self.height = height
self.number_of_sections = number_of_sections
txt2 = _('Explore Status:')
item = goocanvas.Text(
parent = self.rootitem,
x = self.x,
y = self.y,
text = txt2,
font = gcompris.skin.get_font("gcompris/board/medium"),
use_markup = True
)
bounds = item.get_bounds()
# This is the start of the bar
self.x += bounds.x2 - bounds.x1 + 20
self.progressBar = goocanvas.Rect(
parent = self.rootitem,
x = self.x,
y = self.y,
width = self.width,
height = self.height,
stroke_color = "black",
fill_color_rgba = 0x666666AAL,
line_width = 2.0,
radius_x = 3,
radius_y = 3 )
def success(self):
''' Add a success item in the progress bar '''
success_width = self.width * 1.0 / self.number_of_sections
goocanvas.Rect(
parent = self.rootitem,
x = self.x,
y = self.y,
width = success_width,
height = self.height,
stroke_color = "black",
fill_color = "#32CD32",
radius_x = 3,
radius_y = 3,
line_width = 2.0)
# add a little decoration
goocanvas.Image(
parent = self.rootitem,
x = self.x + (success_width / 2.0) - 15,
y = self.y - 20,
pixbuf = gcompris.utils.load_pixmap('explore/ribbon.png')
)
self.x += success_width
|
keshashah/GCompris
|
src/explore-activity/explore.py
|
Python
|
gpl-2.0
| 20,482
|
[
"VisIt"
] |
d0234e17d48e9b92b6d1697bf96ec3f47388840405f4741f57af47a288d76f35
|
"""Helper module to query vtk cell sizes upon importing."""
try:
from vtkmodules import vtkCommonDataModel
except:
import vtk as vtkCommonDataModel
vtkcell_types = [
['VTK_EMPTY_CELL', 'vtkEmptyCell'],
['VTK_VERTEX', 'vtkVertex'],
['VTK_POLY_VERTEX', 'vtkPolyVertex'],
['VTK_LINE', 'vtkLine'],
['VTK_POLY_LINE', 'vtkPolyLine'],
['VTK_TRIANGLE', 'vtkTriangle'],
['VTK_TRIANGLE_STRIP', 'vtkTriangleStrip'],
['VTK_POLYGON', 'vtkPolygon'],
['VTK_PIXEL', 'vtkPixel'],
['VTK_QUAD', 'vtkQuad'],
['VTK_TETRA', 'vtkTetra'],
['VTK_VOXEL', 'vtkVoxel'],
['VTK_HEXAHEDRON', 'vtkHexahedron'],
['VTK_WEDGE', 'vtkWedge'],
['VTK_PYRAMID', 'vtkPyramid'],
['VTK_PENTAGONAL_PRISM', 'vtkPentagonalPrism'],
['VTK_HEXAGONAL_PRISM', 'vtkHexagonalPrism'],
['VTK_QUADRATIC_EDGE', 'vtkQuadraticEdge'],
['VTK_QUADRATIC_TRIANGLE', 'vtkQuadraticTriangle'],
['VTK_QUADRATIC_QUAD', 'vtkQuadraticQuad'],
['VTK_QUADRATIC_POLYGON', 'vtkQuadraticPolygon'],
['VTK_QUADRATIC_TETRA', 'vtkQuadraticTetra'],
['VTK_QUADRATIC_HEXAHEDRON', 'vtkQuadraticHexahedron'],
['VTK_QUADRATIC_WEDGE', 'vtkQuadraticWedge'],
['VTK_QUADRATIC_PYRAMID', 'vtkQuadraticPyramid'],
['VTK_BIQUADRATIC_QUAD', 'vtkBiQuadraticQuad'],
['VTK_TRIQUADRATIC_HEXAHEDRON', 'vtkTriQuadraticHexahedron'],
['VTK_QUADRATIC_LINEAR_QUAD', 'vtkQuadraticLinearQuad'],
['VTK_QUADRATIC_LINEAR_WEDGE', 'vtkQuadraticLinearWedge'],
['VTK_BIQUADRATIC_QUADRATIC_WEDGE', 'vtkBiQuadraticQuadraticWedge'],
['VTK_BIQUADRATIC_QUADRATIC_HEXAHEDRON', 'vtkBiQuadraticQuadraticHexahedron'],
['VTK_BIQUADRATIC_TRIANGLE', 'vtkBiQuadraticTriangle'],
['VTK_CUBIC_LINE', 'vtkCubicLine'],
['VTK_CONVEX_POINT_SET', 'vtkConvexPointSet'],
['VTK_POLYHEDRON', 'vtkPolyhedron'],
['VTK_LAGRANGE_CURVE', 'vtkLagrangeCurve'],
['VTK_LAGRANGE_TRIANGLE', 'vtkLagrangeTriangle'],
['VTK_LAGRANGE_QUADRILATERAL', 'vtkLagrangeQuadrilateral'],
['VTK_LAGRANGE_HEXAHEDRON', 'vtkLagrangeHexahedron'],
['VTK_LAGRANGE_WEDGE', 'vtkLagrangeWedge'],
['VTK_BEZIER_CURVE', 'vtkBezierCurve'],
['VTK_BEZIER_TRIANGLE', 'vtkBezierTriangle'],
['VTK_BEZIER_QUADRILATERAL', 'vtkBezierQuadrilateral'],
['VTK_BEZIER_TETRAHEDRON', 'vtkBezierTetra'],
['VTK_BEZIER_HEXAHEDRON', 'vtkBezierHexahedron'],
['VTK_BEZIER_WEDGE', 'vtkBezierWedge']
]
# get the number of points in a cell for a given cell type
# compute this at runtime as this is version dependent
enum_cell_type_nr_points_map = {}
for cell_num_str, cell_str in vtkcell_types:
if hasattr(vtkCommonDataModel, cell_str) and hasattr(vtkCommonDataModel, cell_num_str):
try:
cell_num = getattr(vtkCommonDataModel, cell_num_str)
n_points = getattr(vtkCommonDataModel, cell_str)().GetNumberOfPoints()
enum_cell_type_nr_points_map[cell_num] = n_points
except:
pass
|
akaszynski/vtkInterface
|
pyvista/utilities/cell_type_helper.py
|
Python
|
mit
| 2,937
|
[
"VTK"
] |
ff8604ab02a6bd4233118fe27a2c5212d84a1501b668fc493a73b430c9705ab8
|
import os
from deployos import call_in_dir
from deploynpm import resolve_in_node_modules
def _gulp_in_dir(working_dir, args):
gulp_cmd = resolve_in_node_modules(working_dir, "gulp")
call_in_dir(working_dir, gulp_cmd, args)
def gulp(working_dir, *args):
_gulp_in_dir(working_dir, list(args))
|
kostrse/coworkingmap
|
deploy/deploygulp.py
|
Python
|
mit
| 307
|
[
"GULP"
] |
e28544283c8900e2b21fda88903e7519678b31a717754af6a6c845546b409b4f
|
# $Id$
#
# Copyright (C) 2003-2006 greg Landrum and Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
""" unit testing code for BitEnsembles
"""
import os
import shutil
import tempfile
import unittest
from rdkit import RDConfig
from rdkit.DataStructs import SparseBitVect
# This import is important to initialize the BitEnsemble module
from rdkit.DataStructs import BitEnsembleDb
from rdkit.DataStructs.BitEnsemble import BitEnsemble
class TestCase(unittest.TestCase):
def test1(self):
ensemble = BitEnsemble()
ensemble.SetBits([1, 11, 21, 31])
self.assertEqual(ensemble.GetNumBits(), 4)
bv = SparseBitVect(100)
bv.SetBit(1)
bv.SetBit(11)
bv.SetBit(13)
score = ensemble.ScoreWithOnBits(bv)
assert score == 2, 'bad score: %d' % (score)
score = ensemble.ScoreWithIndex(bv)
assert score == 2, 'bad score: %d' % (score)
def test2(self):
ensemble = BitEnsemble([1, 11, 21, 31])
bv = SparseBitVect(100)
bv.SetBit(1)
bv.SetBit(11)
bv.SetBit(13)
score = ensemble.ScoreWithOnBits(bv)
assert score == 2, 'bad score: %d' % (score)
score = ensemble.ScoreWithIndex(bv)
assert score == 2, 'bad score: %d' % (score)
def test3(self):
ensemble = BitEnsemble()
for bit in [1, 11, 21, 31]:
ensemble.AddBit(bit)
bv = SparseBitVect(100)
bv.SetBit(1)
bv.SetBit(11)
bv.SetBit(13)
score = ensemble.ScoreWithOnBits(bv)
assert score == 2, 'bad score: %d' % (score)
score = ensemble.ScoreWithIndex(bv)
assert score == 2, 'bad score: %d' % (score)
def _setupDb(self):
from rdkit.Dbase.DbConnection import DbConnect
fName = RDConfig.RDTestDatabase
if RDConfig.useSqlLite:
_, tempName = tempfile.mkstemp(suffix='sqlt')
self.tempDbName = tempName
shutil.copyfile(fName, tempName)
else: # pragma: nocover
tempName = '::RDTests'
self.conn = DbConnect(tempName)
self.dbTblName = 'bit_ensemble_test'
return self.conn
def tearDown(self):
if hasattr(self, 'tempDbName') and RDConfig.useSqlLite and os.path.exists(self.tempDbName):
try:
os.unlink(self.tempDbName)
except: # pragma: nocover
import traceback
traceback.print_exc()
def testdb1(self):
""" test the sig - db functionality """
conn = self._setupDb()
ensemble = BitEnsemble()
for bit in [1, 3, 4]:
ensemble.AddBit(bit)
sigBs = [([0, 0, 0, 0, 0, 0], (0, 0, 0)),
([0, 1, 0, 1, 0, 0], (1, 1, 0)),
([0, 1, 0, 0, 1, 0], (1, 0, 1)),
([0, 1, 0, 0, 1, 1], (1, 0, 1)), ]
ensemble.InitScoreTable(conn, self.dbTblName)
for bs, tgt in sigBs:
ensemble.ScoreToDb(bs, conn)
conn.Commit()
d = conn.GetData(table=self.dbTblName)
assert len(d) == len(sigBs), 'bad number of results returned'
for i in range(len(sigBs)):
bs, tgt = tuple(sigBs[i])
dbRes = tuple(d[i])
assert dbRes == tgt, 'bad bits returned: %s != %s' % (str(dbRes), str(tgt))
d = None
self.conn = None
def testdb2(self):
""" test the sig - db functionality """
conn = self._setupDb()
ensemble = BitEnsemble()
for bit in [1, 3, 4]:
ensemble.AddBit(bit)
sigBs = [([0, 0, 0, 0, 0, 0], (0, 0, 0)),
([0, 1, 0, 1, 0, 0], (1, 1, 0)),
([0, 1, 0, 0, 1, 0], (1, 0, 1)),
([0, 1, 0, 0, 1, 1], (1, 0, 1)), ]
ensemble.InitScoreTable(conn, self.dbTblName, idInfo='id varchar(10)', actInfo='act int')
for bs, tgt in sigBs:
ensemble.ScoreToDb(bs, conn, id='foo', act=1)
conn.Commit()
d = conn.GetData(table=self.dbTblName)
assert len(d) == len(sigBs), 'bad number of results returned'
for i in range(len(sigBs)):
bs, tgt = tuple(sigBs[i])
dbRes = tuple(d[i])
assert dbRes[1:-1] == tgt, 'bad bits returned: %s != %s' % (str(dbRes[1:-1]), str(tgt))
d = None
self.conn = None
if __name__ == '__main__': # pragma: nocover
unittest.main()
|
ptosco/rdkit
|
rdkit/DataStructs/UnitTestBitEnsemble.py
|
Python
|
bsd-3-clause
| 4,181
|
[
"RDKit"
] |
fe1ff6bac2d5dd60deac162b7840069f9bcf9faaa559ef7298a7db5db136f53a
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import re
from pymatgen.core.structure import Molecule
from monty.io import zopen
"""
Module implementing an XYZ file object class.
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Apr 17, 2012"
class XYZ:
"""
Basic class for importing and exporting Molecules or Structures in XYZ
format.
Args:
mol: Input molecule or list of molecules
.. note::
Exporting periodic structures in the XYZ format will lose information
about the periodicity. Essentially, only cartesian coordinates are
written in this format and no information is retained about the
lattice.
"""
def __init__(self, mol, coord_precision=6):
if isinstance(mol, Molecule) or not isinstance(mol, list):
self._mols = [mol]
else:
self._mols = mol
self.precision = coord_precision
@property
def molecule(self):
"""
Returns molecule associated with this XYZ. In case multiple frame
XYZ, returns the last frame.
"""
return self._mols[-1]
@property
def all_molecules(self):
"""
Returns all the frames of molecule associated with this XYZ.
"""
return self._mols
@staticmethod
def _from_frame_string(contents):
"""
Convert a single frame XYZ string to a molecule
"""
lines = contents.split("\n")
num_sites = int(lines[0])
coords = []
sp = []
coord_patt = re.compile(
r"(\w+)\s+([0-9\-\+\.eEdD]+)\s+([0-9\-\+\.eEdD]+)\s+([0-9\-\+\.eEdD]+)"
)
for i in range(2, 2 + num_sites):
m = coord_patt.search(lines[i])
if m:
sp.append(m.group(1)) # this is 1-indexed
# this is 0-indexed
# in case of 0.0D+00 or 0.00d+01 old double precision writing
# replace d or D by e for ten power exponent
xyz = [val.lower().replace("d", "e") for val in m.groups()[1:4]]
coords.append([float(val) for val in xyz])
return Molecule(sp, coords)
@staticmethod
def from_string(contents):
"""
Creates XYZ object from a string.
Args:
contents: String representing an XYZ file.
Returns:
XYZ object
"""
if contents[-1] != "\n":
contents += "\n"
white_space = r"[ \t\r\f\v]"
natoms_line = white_space + r"*\d+" + white_space + r"*\n"
comment_line = r"[^\n]*\n"
coord_lines = r"(\s*\w+\s+[0-9\-\+\.eEdD]+\s+[0-9\-\+\.eEdD]+\s+[0-9\-\+\.eEdD]+\s*\n)+"
frame_pattern_text = natoms_line + comment_line + coord_lines
pat = re.compile(frame_pattern_text, re.MULTILINE)
mols = []
for xyz_match in pat.finditer(contents):
xyz_text = xyz_match.group(0)
mols.append(XYZ._from_frame_string(xyz_text))
return XYZ(mols)
@staticmethod
def from_file(filename):
"""
Creates XYZ object from a file.
Args:
filename: XYZ filename
Returns:
XYZ object
"""
with zopen(filename) as f:
return XYZ.from_string(f.read())
def _frame_str(self, frame_mol):
output = [str(len(frame_mol)), frame_mol.composition.formula]
fmtstr = "{{}} {{:.{0}f}} {{:.{0}f}} {{:.{0}f}}".format(self.precision)
for site in frame_mol:
output.append(fmtstr.format(site.specie, site.x, site.y, site.z))
return "\n".join(output)
def __str__(self):
return "\n".join([self._frame_str(mol) for mol in self._mols])
def write_file(self, filename):
"""
Writes XYZ to file.
Args:
filename: File name of output file.
"""
with zopen(filename, "wt") as f:
f.write(self.__str__())
|
dongsenfo/pymatgen
|
pymatgen/io/xyz.py
|
Python
|
mit
| 4,144
|
[
"pymatgen"
] |
dd5eb554a2746f9d76e0217fafc3c43080348304517e639cbbd5f89c45a7eebf
|
###
### This script can be run with pvpython rather than pvbatch, as it does not
### need mpi.
###
### Purpose:
###
### Generate a static image dataset of volume rendering on the ne cooling data
###
### Example usages (assumes you are in directory with this script):
###
### 1) To run on the coarse mesh with tent-shaped opacity functions
###
### /home/scott/projects/ParaView/build/bin/pvpython volume-vorticity.py --inputdir "/media/scott/CINEMA FAT/ne-water-cool/coarse" --inputpattern "101results_%d.vtk" --outputdir "/media/scott/CINEMA FAT/ne-water-cool/coarse/Output/vorticity/tent" --optype "tent"
###
### 2) To run on the coarse mesh with linear opacity functions
###
### /home/scott/projects/ParaView/build/bin/pvpython volume-vorticity.py --inputdir "/media/scott/CINEMA FAT/ne-water-cool/coarse" --inputpattern "101results_%d.vtk" --outputdir "/media/scott/CINEMA FAT/ne-water-cool/coarse/Output/vorticity/linear" --optype "linear"
###
### 3) To run on the fine mesh with tent-shaped opacity functions
###
### /home/scott/projects/ParaView/build/bin/pvpython volume-vorticity.py --inputdir "/media/scott/CINEMA FAT/ne-water-cool/fine" --inputpattern "fine_results_%d.vtk" --outputdir "/media/scott/CINEMA FAT/ne-water-cool/fine/Output/vorticity/tent" --optype "tent"
###
### 4) To run on the fine mesh with linear opacity functions
###
### /home/scott/projects/ParaView/build/bin/pvpython volume-vorticity.py --inputdir "/media/scott/CINEMA FAT/ne-water-cool/fine" --inputpattern "fine_results_%d.vtk" --outputdir "/media/scott/CINEMA FAT/ne-water-cool/fine/Output/vorticity/linear" --optype "linear"
###
import sys, os, argparse
from paraview.simple import *
from paraview import data_exploration as wx
#import matplotlib.pyplot as plt
###############################################################################
# Helper function to generate the tent functions needed for scalar opacity
# function
###############################################################################
def createHatFunctions():
baseWidth = 0.20
spacing = baseWidth / 2.0
halfWidth = baseWidth / 2.0
numberCenters = 1.0 / baseWidth
centers = [ (baseWidth / 2.0) + (i * baseWidth) for i in range(int(numberCenters)) ]
hatFunctions = []
for c in centers:
startPoint = c - halfWidth
xPoints = [ 0.0, startPoint, startPoint + spacing, startPoint + (2 * spacing), 1.0 ]
yPoints = [ 0.0, 0.0, 1.0, 0.0, 0.0 ]
hatFunctions.append([xPoints, yPoints])
#plt.plot(xPoints, yPoints, marker='o')
#plt.show()
return hatFunctions
###############################################################################
# This method does all the processing
###############################################################################
def doProcessing(inputDir, inputPattern, outputDir, opacityFnType):
# -----------------------------------------------------------------------------
# Path to input/output data/directories
# -----------------------------------------------------------------------------
files_pattern = os.path.join(inputDir, inputPattern)
file_times = range(0, 101)
#file_times = [ 80 ]
filenames = [ (files_pattern % time) for time in file_times]
# -----------------------------------------------------------------------------
# Rendering configuration
# -----------------------------------------------------------------------------
resolution = 500
view_size = [resolution, resolution]
angle_steps = [15, 15]
#angle_steps = [90, 90]
distance = 24632.991324377483
rotation_axis = [0.0, 1.0, 0.0]
#center_of_rotation = [-1649.1046142578125, -752.328125, 1374.1217346191406]
center_of_rotation = [0.0, 0.0, 0.0]
view = GetRenderView()
view.ViewSize = view_size
view.Background = [0.0, 0.0, 0.0]
view.OrientationAxesVisibility = 0
view.CenterAxesVisibility = 0
# -----------------------------------------------------------------------------
# Output configuration
# -----------------------------------------------------------------------------
fng = wx.FileNameGenerator(outputDir, '{time}/{volumeIdx}/{theta}_{phi}.jpg')
exporter = wx.ThreeSixtyImageStackExporter(fng,
view,
center_of_rotation,
distance,
rotation_axis,
angle_steps)
# -----------------------------------------------------------------------------
# Pipeline configuration
# -----------------------------------------------------------------------------
# create a new 'Legacy VTK Reader'
readerProxy = LegacyVTKReader(FileNames=filenames)
# This translation transform is a workaround for a bug in the camera orbiting
# calculations made in ThreeSixtyImageStackExporter
transform1 = Transform(Input=readerProxy)
transform1.Transform = 'Transform'
transform1.Transform.Translate = [1649.1046142578125, 752.328125, -1374.1217346191406]
# create a new 'Cell Data to Point Data'
cellDatatoPointData1 = CellDatatoPointData(Input=transform1)
# get color transfer function/color map for 'vorticity'
vorticityLUT = GetColorTransferFunction('vorticity')
vorticityLUT.RGBPoints = [0.0, 0.0, 0.0, 1.0, 200.0, 1.0, 0.0, 0.0]
vorticityLUT.LockScalarRange = 1
vorticityLUT.ColorSpace = 'HSV'
vorticityLUT.NanColor = [0.498039, 0.498039, 0.498039]
vorticityLUT.ScalarRangeInitialized = 1.0
# get opacity transfer function/opacity map for 'vorticity'
vorticityPWF = GetOpacityTransferFunction('vorticity')
vorticityPWF.Points = [0.0, 0.0, 0.5, 0.0, 200.0, 1.0, 0.5, 0.0]
vorticityPWF.ScalarRangeInitialized = 1
# show data from fine_results_
readerDisplay = Show(transform1)
readerDisplay.ColorArrayName = [None, '']
readerDisplay.Opacity = 0.15
readerDisplay.ScalarOpacityUnitDistance = 158.07645437184576
# show data from cellDatatoPointData1
cellDatatoPointData1Display = Show(cellDatatoPointData1)
cellDatatoPointData1Display.Representation = 'Volume'
cellDatatoPointData1Display.ColorArrayName = ['POINTS', 'vorticity']
cellDatatoPointData1Display.LookupTable = vorticityLUT
cellDatatoPointData1Display.ScalarOpacityFunction = vorticityPWF
cellDatatoPointData1Display.ScalarOpacityUnitDistance = 158.07645437184576
# -----------------------------------------------------------------------------
# Batch processing
# -----------------------------------------------------------------------------
if opacityFnType == 'tent':
hatFunctions = createHatFunctions()
Render()
for t in range(0, len(file_times), 1):
time = file_times[t]
GetAnimationScene().TimeKeeper.Time = float(time)
UpdatePipeline(time)
dataRange = [0.0, 200.0]
print "Moving to timestep ",time,", new data range: ",dataRange
for volumeIdx in range(5):
curRange = dataRange[1] - dataRange[0]
pwfPoints = []
if opacityFnType == 'tent':
xPoints = hatFunctions[volumeIdx][0]
yPoints = hatFunctions[volumeIdx][1]
for i in range(len(xPoints)):
pwfPoints.append(dataRange[0] + (xPoints[i] * curRange))
pwfPoints.append(yPoints[i])
pwfPoints.append(0.5)
pwfPoints.append(0.0)
else:
curStep = dataRange[0] + (float(volumeIdx) * (curRange / 5.0))
pwfPoints = [ dataRange[0], 0.0, 0.5, 0.0,
curStep, 0.0, 0.5, 0.0,
dataRange[1], 1.0, 0.5, 0.0 ]
newPwf = CreatePiecewiseFunction( Points=pwfPoints )
cellDatatoPointData1Display.ScalarOpacityFunction = newPwf
fng.update_active_arguments(volumeIdx=volumeIdx)
fng.update_label_arguments(volumeIdx="Idx")
exporter.UpdatePipeline(time)
###############################################################################
# Main script entry point
###############################################################################
if __name__ == "__main__":
description = "Python script to generate volume rendered NE cooling data"
parser = argparse.ArgumentParser(description=description)
parser.add_argument("--inputdir", type=str, default="", help="Path to directory where input data files exist")
parser.add_argument("--inputpattern", type=str, default="", help="String pattern containing %d where pattern should be replaced with numbers")
parser.add_argument("--outputdir", type=str, default="", help="Path to directory where cinema dataset should be written")
parser.add_argument("--optype", type=str, default="", help="Opacity function type, should be either 'tent' or 'linear'")
args = parser.parse_args()
doProcessing(args.inputdir, args.inputpattern, args.outputdir, args.optype)
|
Kitware/cinema
|
scripts/data_generation/ne-cooling/volume-vorticity.py
|
Python
|
bsd-3-clause
| 9,252
|
[
"ParaView",
"VTK"
] |
8d9a459bbd3a78f4a314273eff5b9e8f2b0b2e11b0fcc1b24a5102d9924d1088
|
# This file is part of the myhdl library, a Python package for using
# Python as a Hardware Description Language.
#
# Copyright (C) 2003-2008 Jan Decaluwe
#
# The myhdl library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation; either version 2.1 of the
# License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
""" myhdl toVerilog package.
"""
from __future__ import absolute_import
import inspect
import ast
import myhdl
from myhdl import *
from myhdl import ConversionError
from myhdl._util import _flatten
from myhdl._compat import PY2
class _error(object):
FirstArgType = "first argument should be a classic function"
ArgType = "leaf cell type error"
NotSupported = "Not supported"
TopLevelName = "Result of toVerilog call should be assigned to a top level name"
SigMultipleDriven = "Signal has multiple drivers"
UndefinedBitWidth = "Signal has undefined bit width"
UndrivenSignal = "Signal is not driven"
UnreadSignal = "Signal is driven but not read"
UnusedPort = "Port is not used"
OutputPortRead = "Output port is read internally"
Requirement = "Requirement violation"
UnboundLocal = "Local variable may be referenced before assignment"
TypeMismatch = "Type mismatch with earlier assignment"
NrBitsMismatch = "Nr of bits mismatch with earlier assignment"
IntbvBitWidth = "intbv object should have a bit width"
#IntbvSign = "intbv's that can have negative values are not yet supported"
ModbvRange = "modbv object should have full bit vector range"
TypeInfer = "Can't infer variable type"
ReturnTypeMismatch = "Return type mismatch"
ReturnNrBitsMismatch = "Returned nr of bits mismatch"
ReturnIntbvBitWidth = "Returned intbv instance should have bit width"
ReturnTypeInfer = "Can't infer return type"
ShadowingSignal = "Port is shadowed by internal signal"
ShadowingVar = "Variable has same name as a hierarchical Signal"
FreeVarTypeError = "Free variable should be a Signal or an int"
ExtraArguments = "Extra positional or named arguments are not supported"
UnsupportedYield = "Unsupported yield statement"
UnsupportedListComp = \
"Unsupported list comprehension form: should be [intbv()[n:] for i in range(m)]"
ListElementAssign = \
"Can't assign to list element; use slice assignment to change its value"
NotASignal = "Non-local object should be a Signal"
UnsupportedType = "Object type is not supported in this context"
InconsistentType = "Signal elements should have the same base type"
InconsistentBitWidth = "Signal elements should have the same bit width"
UnsupportedFormatString = "Unsupported format string"
FormatString = "Format string error"
UnsupportedAttribute = "Unsupported attribute"
PortInList = "Port in list is not supported"
ListAsPort = "List of signals as a port is not supported"
SignalInMultipleLists = "Signal in multiple list is not supported"
class _access(object):
INPUT, OUTPUT, INOUT, UNKNOWN = range(4)
class _kind(object):
NORMAL, DECLARATION, ALWAYS, INITIAL, ALWAYS_DECO, \
ALWAYS_COMB, SIMPLE_ALWAYS_COMB, ALWAYS_SEQ, \
TASK, REG \
= range(10)
class _context(object):
BOOLEAN, YIELD, PRINT, SIGNED, UNKNOWN = range(5)
class _ConversionMixin(object):
# def getLineNo(self, node):
# lineno = node.lineno
# if lineno is None:
# for n in node.getChildNodes():
# if n.lineno is not None:
# lineno = n.lineno
# break
# lineno = lineno or 0
# return lineno
def getLineNo(self, node):
lineno = 0
if isinstance(node, (ast.stmt, ast.expr)):
lineno = node.lineno
return lineno
def getObj(self, node):
if hasattr(node, 'obj'):
return node.obj
return None
def getTarget(self, node):
if hasattr(node, 'target'):
return node.target
return None
def getKind(self, node):
if hasattr(node, 'kind'):
return node.kind
return None
def getEdge(self, node):
if hasattr(node, 'edge'):
return node.edge
return None
def getValue(self, node):
if hasattr(node, 'value'):
return node.value
return None
def getVal(self, node):
expr = ast.Expression()
expr.body = node
expr.lineno = node.lineno
expr.col_offset = node.col_offset
c = compile(expr, '<string>', 'eval')
val = eval(c, self.tree.symdict, self.tree.vardict)
# val = eval(_unparse(node), self.tree.symdict, self.tree.vardict)
return val
def raiseError(self, node, kind, msg=""):
lineno = self.getLineNo(node)
info = "in file %s, line %s:\n " % \
(self.tree.sourcefile, self.tree.lineoffset+lineno)
raise ConversionError(kind, msg, info)
def require(self, node, test, msg=""):
assert isinstance(node, ast.AST)
if not test:
self.raiseError(node, _error.Requirement, msg)
def visitChildNodes(self, node, *args):
for n in node.getChildNodes():
self.visit(n, *args)
def visitList(self, nodes):
for n in nodes:
self.visit(n)
def _LabelGenerator():
i = 1
while 1:
yield "MYHDL%s" % i
i += 1
_genLabel = _LabelGenerator()
class _Label(object):
def __init__(self, name):
self.name = next(_genLabel) + '_' + name
self.isActive = False
def __str__(self):
return str(self.name)
# this can be made more sophisticated to deal with existing suffixes
# also, may require reset facility
class _UniqueSuffixGenerator(object):
def __init__(self):
self.i = 0
def reset(self):
self.i = 0
def next(self):
self.i += 1
return "_%s" % self.i
_genUniqueSuffix = _UniqueSuffixGenerator()
# check if expression is constant
def _isConstant(tree, symdict):
v = _namesVisitor()
v.visit(tree)
for name in v.names:
if name not in symdict:
return False
if not isinstance(symdict[name], int):
return False
return True
class _namesVisitor(ast.NodeVisitor):
def __init__(self):
self.names = []
def visit_Name(self, node):
self.names.append(node.id)
def _get_argnames(node):
if PY2:
return [arg.id for arg in node.args.args]
else:
return [arg.arg for arg in node.args.args]
|
gw0/myhdl
|
myhdl/conversion/_misc.py
|
Python
|
lgpl-2.1
| 7,168
|
[
"VisIt"
] |
f2a19faeca88b9e0305e83e58c95c6fb84b77ce698fe915495ef9d269351f125
|
#!/usr/bin/env python
'''
This example shows how to generate the 3-center integrals and the 2-center
integral metric for the density-fitting method.
'''
import scipy
from pyscf import gto, df, lib
mol = gto.Mole()
mol.atom = '''
C 0. 0. 0.
O 0. 0. 1.3
'''
mol.basis = 'ccpvdz'
mol.build()
# Define the auxiliary fitting basis for 3-center integrals. Use the function
# make_auxmol to construct the auxiliary Mole object (auxmol) which will be
# used to generate integrals.
auxbasis = 'ccpvdz-jk-fit'
auxmol = df.addons.make_auxmol(mol, auxbasis)
# ints_3c is the 3-center integral tensor (ij|P), where i and j are the
# indices of AO basis and P is the auxiliary basis
ints_3c2e = df.incore.aux_e2(mol, auxmol, intor='int3c2e')
ints_2c2e = auxmol.intor('int2c2e')
nao = mol.nao
naux = auxmol.nao
# Compute the DF coefficients (df_coef) and the DF 2-electron (df_eri)
df_coef = scipy.linalg.solve(ints_2c2e, ints_3c2e.reshape(nao*nao, naux).T)
df_coef = df_coef.reshape(naux, nao, nao)
df_eri = lib.einsum('ijP,Pkl->ijkl', ints_3c2e, df_coef)
# Now check the error of DF integrals wrt the normal ERIs
print(abs(mol.intor('int2e') - df_eri).max())
# df_coef can be computed with different metric
ints_3c1e = df.incore.aux_e2(mol, auxmol, intor='int3c1e')
ints_2c1e = auxmol.intor('int1e_ovlp')
df_coef = scipy.linalg.solve(ints_2c1e, ints_3c1e.reshape(nao*nao, naux).T)
df_coef = df_coef.reshape(naux, nao, nao)
df_eri = lib.einsum('ijP,Pkl->ijkl', ints_3c2e, df_coef)
print(abs(mol.intor('int2e') - df_eri).max())
|
gkc1000/pyscf
|
examples/df/10-access_df_integrals.py
|
Python
|
apache-2.0
| 1,545
|
[
"PySCF"
] |
6eb24efabb3da1fe0c95dc023143b072351745c2cb96673af13145e631246986
|
#!/usr/bin/python
import os, sys # low level handling, such as command line stuff
import string # string methods available
import re # regular expressions
import getopt # comand line argument handling
from low import * # custom functions, written by myself
from Bio import SeqIO # biopython stuff, to parse fasta files for instance
from rpy import r
from pylab import *
# =============================================================================
def show_help( ):
""" displays the program parameter list and usage information """
stdout( "usage: " + sys.argv[0] + " -f <path> -n <path>" )
stdout( " " )
stdout( " option description" )
stdout( " -h help (this text here)" )
stdout( " -f path to the predicted protein sequence fasta file" )
stdout( " -n path to the nucleotide sequence fasta file" )
stdout( " " )
sys.exit(1)
# =============================================================================
def handle_arguments():
""" verifies the presence of all necessary arguments and returns the data dir """
if len ( sys.argv ) == 1:
stderr( "no arguments provided." )
show_help()
try: # check for the right arguments
keys, values = getopt.getopt( sys.argv[1:], "hf:n:" )
except getopt.GetoptError:
stderr( "invalid arguments provided." )
show_help()
orffile, ntfile = '', ''
for key, value in keys:
if key == '-f': orffile = value
if key == '-n': ntfile = value
if orffile == '':
stderr( "orf sequence data file missing." )
show_help()
elif not file_exists( orffile ):
stderr( "invalid path in orffile " + orffile )
show_help()
if ntfile == '':
stderr( "nucleotide sequence data file missing." )
show_help()
elif not file_exists( ntfile ):
stderr( "invalid path in ntfile " + ntfile )
show_help()
ntfile = get_global_path( ntfile )
orffile = get_global_path( orffile )
return orffile, ntfile
# =============================================================================
def orf_stats( orffile, ntfile ):
"""
"""
# read in all nt sequences and store them in a hash
nthash = {}
handle = open( ntfile )
for seq_record in SeqIO.parse(handle, "fasta"):
nthash[seq_record.id] = seq_record.seq.tostring()
handle.close()
#print "read in %s nucleotide sequences." % len(nthash)
stopcodons = [ 'TAG', 'TAA', 'TGA' ]
# do stats on each predicted orf (= entry in orffile)
handle = open( orffile )
aaseqlength = []
aaseqstop = []
fw = open( get_basename( orffile ) + '.withstop', 'w' )
for seq_record in SeqIO.parse(handle, "fasta") :
orfinfo = seq_record.description.split() # id frame ntfrom ntto
id = seq_record.id
aaseq = seq_record.seq.tostring()
ntseq = nthash[id]
stop = 0
pos = int(orfinfo[-2])-1
while (pos < int(orfinfo[-1])+3):
codon = ntseq[ pos : pos+3 ]
if codon in stopcodons:
stop = 1
break
pos += 3
print "%s\t%s\t%s" %( id, len(aaseq), stop )
aaseqlength.append( len(aaseq) )
aaseqstop.append( stop )
if stop: fw.write( ">" + id + "\n" + aaseq + "*\n" )
fw.flush()
fw.close()
handle.close()
rc( 'figure', figsize=(12,5) )
# all SNPs
figure()
subplot(121)
hist( aaseqlength, fc='grey' )
title( get_basename(orffile) + ' sequence length (aa)' )
subplot(122)
hist( aaseqstop, bins=[0,1], fc='grey' )
title( get_basename(orffile) + ' sequences containing a stop codon' )
savefig( get_basename(orffile) + '.pdf')
# =============================================================================
# === MAIN ====================================================================
# =============================================================================
def main():
"""
"""
orffile, ntfile = handle_arguments()
orf_stats( orffile, ntfile )
# =============================================================================
main()
|
lotharwissler/bioinformatics
|
python/openreadingframe/stats_predicted_orfs.py
|
Python
|
mit
| 3,848
|
[
"Biopython"
] |
2eefadbca7c95f20a4515ec728724a0a49cfca0e5b4d9e35951c1fc140e4460a
|
#A* -------------------------------------------------------------------
#B* This file contains source code for the PyMOL computer program
#C* copyright 1998-2000 by Warren Lyford Delano of DeLano Scientific.
#D* -------------------------------------------------------------------
#E* It is unlawful to modify or remove this copyright notice.
#F* -------------------------------------------------------------------
#G* Please see the accompanying LICENSE file for further information.
#H* -------------------------------------------------------------------
#I* Additional authors of this source file include:
#-*
#-*
#-*
#Z* -------------------------------------------------------------------
# this is quick and dirty irst crack at a gamess interface, written by someone who
# knows very little about the program (WLD) - hence the version 1 identifier...
# by the way, most of the below is untested...
import os
import shutil
import glob
import re
import string
import sys
import time
from chempy import feedback
from chempy.brick import Brick
atNum = {
'H' : 1,
'C' : 6,
'N' : 7,
'O' : 8,
'F' : 9,
'P' : 15,
'S' : 16,
'Cl' : 17,
'Br' : 35,
'I' : 53,
}
def do(input,run_prefix=None,echo=None,
punch=None,output=None,skip=None):
if not run_prefix:
run_prefix = 'gamess_run'
if not skip:
if feedback['gamess']:
print " "+str(__name__)+': creating temporary files "%s.*"' % (run_prefix)
print " "+str(__name__)+': launching gamess...'
try:
for a in glob.glob(run_prefix+".*"):
os.unlink(a)
except:
pass
f = open(run_prefix+".inp",'w')
for a in input:
f.write(a)
f.close()
if echo:
os.system(rungms_path+' '+run_prefix+" 2>&1 | tee "+run_prefix+".out")
else:
os.system(rungms_path+' '+run_prefix+" > "+run_prefix+".out 2>&1")
# NFS workaround (flushes the directory cache so that glob will work)
try: os.unlink(".sync")
except: pass
f = open(".sync",'w')
f.close()
#
if feedback['gamess']:
print " "+str(__name__)+': job complete. '
if punch:
for src in glob.glob(run_prefix+".dat"):
f = open(src)
punch = f.readlines()
f.close()
if output:
for src in glob.glob(run_prefix+".out"):
f = open(src)
output = f.readlines()
f.close()
return (output,punch)
if os.environ.has_key('GAMESS'):
base = os.environ['GAMESS']
bin_path = base + '/bin/'
rungms_path = bin_path + 'rungms'
else:
base = ''
bin_path = ''
params_path = ''
class State:
def __init__(self):
self.model = None
self.data = None
self.vec = None
def load_model(self,model):
self.model = model
def get_zmat_ordering(self):
lst = []
for z in self.model.get_internal_tuples():
lst.append(z[0])
return lst
def get_data_group(self,basis = None,zmat = 1):
model = self.model
gmsList = []
# write header records
gmsList.append(" $DATA\n")
gmsList.append(model.molecule.title+" from "+str(__name__)+"\n")
gmsList.append("C1\n")
# write atom records in an ordering compatible with internal
# coordinate generation
c = 1
for z in self.get_zmat_ordering():
a = model.atom[z]
if not len(a.name):
name = a.symbol + "%02d"%c
else:
name = a.name
gmsList.append("%10s %5.1f %18.10f %18.10f %18.10f\n" %
(name,atNum[a.symbol],a.coord[0],
a.coord[1],a.coord[2]))
c = c + 1
gmsList.append(" $END\n")
return gmsList
def get_ordered_data_group(self):
gmsList = self.data[0:3]
flag = 1
c = 3
for a in self.data[3:]:
if flag:
flag = 0
gmsList.append(a)
if string.strip(a)=='':
flag = 1
c = c + 1
return gmsList
def get_contrl_group(self,
scftyp='RHF',
runtyp='ENERGY',
exetyp='RUN',
coord='UNIQUE',
nzvar = -1):
gmsList = []
model = self.model
if nzvar:
if nzvar<0:
nzvar = (self.model.nAtom*3)-6
gmsList.append(" $CONTRL SCFTYP=%s RUNTYP=%s EXETYP=%s\n"
% (scftyp,runtyp,exetyp) )
if coord:
gmsList.append("COORD=%s\n"%coord)
if nzvar:
gmsList.append("NZVAR=%d\n"%nzvar)
chg = 0
for a in model.atom:
chg = chg + a.formal_charge
chg = int(chg)
if chg==0:
icharg=None
else:
icharg='ICHARG=%d' % chg
if icharg:
gmsList.append("%s\n" % (icharg))
gmsList.append(" $END\n")
return gmsList
def read_output_list(self,list):
ll = len(list)
c = 0
crd_list = []
chg_list = []
nrg_list = []
for a in list:
if a[0:36] == ' COORDINATES OF ALL ATOMS ARE (ANGS)':
crd_list.append(c+3)
if a[0:13] == ' NET CHARGES:':
chg_list.append(c+4)
if a[0:37] == ' TOTAL ENERGY =':
nrg_list.append(c)
c = c + 1
atom = self.model.atom
idx = {}
c = 0
for a in atom:
idx[string.upper(a.name)]=c # games converts to uppercase
c = c + 1
if len(crd_list):
a = crd_list.pop()
cc = 0
while a<ll:
l = list[a]
name = string.strip(l[1:11])
if name=='':
break
atom[idx[name]].coord = [float(l[16:31]),
float(l[31:46]),
float(l[46:61])]
cc = cc + 1
a = a + 1
if cc and feedback['gamess']:
print " "+str(__name__)+': coordinates modified for %d atoms.' % (cc)
if len(chg_list):
a = chg_list.pop()
cc = 0
while a<ll:
l = list[a]
name = string.strip(l[1:11])
if name[0]=='-':
break
atom[idx[name]].partial_charge = float(l[19:27])
a = a + 1
cc = cc + 1
if cc and feedback['gamess']:
print " "+str(__name__)+': charges modified for %d atoms.' % (cc)
if len(nrg_list):
a = nrg_list.pop()
l = list[a]
# get energy, and convert to kcal/mole
self.model.molecule.energy = float(string.strip(l[38:58]))*627.5095
if feedback['gamess']:
print " "+str(__name__)+': energy updated %12.6f.' % self.model.molecule.energy
def read_punch_list(self,list):
ll = len(list)
c = 0
data_list = []
vec_list = []
for a in list:
if a[0:6] == ' $DATA':
data_list.append(c)
elif a[0:5] == ' $VEC':
vec_list.append(c)
c = c + 1
if len(data_list):
a = data_list.pop()
self.data = []
data = self.data
while a<ll:
la = list[a]
data.append(la)
if la[0:5] == ' $END':
break
a = a + 1
if feedback['gamess']:
print " "+str(__name__)+': read $DATA group.'
if len(vec_list):
a = vec_list.pop()
self.vec = []
vec = self.data
while a<ll:
la = list[a]
vec.append(la)
if la[0:5] == ' $END':
break
a = a + 1
if feedback['gamess']:
print " "+str(__name__)+': read new $VEC group.'
def update_data_coords(self): # update coordinates of ordered atoms in $DATA
idx = {}
c = 0
for a in self.model.atom:
idx[string.upper(a.name)]=c
c = c + 1
if self.data:
flag = 1
c = 3
for a in self.data[3:]:
if flag:
flag = 0
kee = a[0:3]
if not idx.has_key(kee):
break
i = idx[kee]
at = self.model.atom[i]
self.data[c]="%-10s%5.1f%18.10f%18.10f%18.10f\n" % (
at.name,atNum[at.symbol],at.coord[0],
at.coord[1],at.coord[2])
if string.strip(a)=='':
flag = 1
c = c + 1
def read_density_list(self,list,brick,z_step):
ll = len(list)
c = 0
den_list = []
for a in list:
if a[0:37] == ' ELECTRON DENSITY, IPOINT,X,Y,Z,EDENS':
den_list.append(c+1)
c = c + 1
if len(den_list):
lst = 0
a = den_list.pop()
for x in xrange(brick.dim[0]):
for y in xrange(brick.dim[1]):
brick.lvl[x][y][z_step] = float(list[a][36:51])
a = a + 1
if feedback['gamess']:
print " "+str(__name__)+': read density slice %d of %d.' %(
z_step+1,brick.dim[2])
def read_potential_list(self,list,brick,z_step):
ll = len(list)
c = 0
pot_list = []
for a in list:
if a[0:51] == 'THE ROWS OF THE ELECTROSTATIC POTENTIAL GRID (A.U.)':
pot_list.append(c+1)
c = c + 1
if len(pot_list):
lst = 0
a = pot_list.pop()
for x in xrange(brick.dim[0]):
aa = a
mat = list[aa][0:3]
col = []
while 1:
if list[aa][0:3]==mat:
col.append(list[aa][5:])
aa = aa + 1
else:
break
a = aa
vst = string.split(string.strip(string.join(col)))
for y in xrange(brick.dim[1]):
brick.lvl[x][y][z_step] = float(vst[y])
if feedback['gamess']:
print " "+str(__name__)+': read potential slice %d of %d.' %(
z_step+1,brick.dim[2])
def get_basis_group(self,gbasis='N31',ngauss=6,ndfunc=1):
gmsList = []
gmsList.append(" $BASIS GBASIS=%s NGAUSS=%d NDFUNC=%d\n" %
(gbasis,ngauss,ndfunc))
model = self.model
chg = 0
for a in model.atom:
chg = chg + a.formal_charge
chg = int(chg)
if chg<0:
diffsp=' DIFFSP=.TRUE.'
else:
diffsp=None
if diffsp:
gmsList.append("%s\n" % (diffsp))
gmsList.append(" $END\n")
return gmsList
def get_zmat_ext_freeze_torsion(self,flag=3):
# requires PYMOL to read dihedrals from structure
# requires list of dihedrals from tinker.amber
#
from pymol import cmd
from tinker.amber import Topology
cmd.load_model(self.model,'_gamess1')
model = self.model
# get mapping of model ordering to zmat ordering
m2z = {}
z2m = {}
c = 1 # GAMESS is one-based
for a in self.get_zmat_ordering():
m2z[a] = c
z2m[c] = a
c = c + 1
# get all torsions in the molecule
topo = Topology(self.model)
# find those where flag is set in all atoms
mask = 2 ** flag
frozen_list = []
for a in topo.torsion.keys():
if (model.atom[a[0]].flags&
model.atom[a[1]].flags&
model.atom[a[2]].flags&
model.atom[a[3]].flags)&mask:
frozen_list.append(a)
print " freeze-torsion: %d torsions will be frozen."%len(frozen_list)
irzmat = []
ifzmat = []
fvalue = []
if len(frozen_list):
for frozen in frozen_list:
# find additional torsions which need to be removed
remove = []
for a in topo.torsion.keys():
if (((a[1]==frozen[1])and(a[2]==frozen[2])) or
((a[2]==frozen[1])and(a[1]==frozen[2]))):
if a!=frozen:
remove.append(a)
# convert to internal coordinate ordering
frozen_z = (m2z[frozen[0]],m2z[frozen[1]],
m2z[frozen[2]],m2z[frozen[3]])
remove_z = []
for a in remove:
remove_z.append(m2z[a[0]],m2z[a[1]],m2z[a[2]],m2z[a[3]])
# now reorder atoms in torsions to reflect z_matrix ordering
# (not sure this is necessary)
if frozen_z[0]>frozen_z[3]:
frozen_z = (frozen_z[3],frozen_z[2],frozen_z[1],frozen_z[0])
tmp_z = []
for a in remove_z:
if a[0]>a[3]:
tmp_z.append((a[3],a[2],a[1],a[0]))
else:
tmp_z.append(a)
remove_z = tmp_z
# get value of the fixed torsion
fixed = (z2m[frozen_z[0]],z2m[frozen_z[1]],
z2m[frozen_z[2]],z2m[frozen_z[3]])
dihe = cmd.get_dihedral("(_gamess1 and id %d)"%fixed[0],
"(_gamess1 and id %d)"%fixed[1],
"(_gamess1 and id %d)"%fixed[2],
"(_gamess1 and id %d)"%fixed[3])
# write out report for user edification
print " freeze-torsion: fixing freeze-torsion:"
print " freeze-torsion: %d-%d-%d-%d (pymol), %d-%d-%d-%d (gamess)"%(
fixed[0],fixed[1],fixed[2],fixed[3],
frozen_z[0],frozen_z[1],frozen_z[2],frozen_z[3])
print " freeze-torsion: at %5.3f"%dihe
print " freeze-torsion: removing redundant torsions:"
for a in remove_z[1:]:
print " freeze-torsion: %d-%d-%d-%d (pymol), %d-%d-%d-%d (gamess)"%(
z2m[a[0]],z2m[a[1]],z2m[a[2]],z2m[a[3]],
a[0],a[1],a[2],a[3])
# add parameters for this torsion into the list
ifzmat.append((3,frozen_z[0],frozen_z[1],frozen_z[2],frozen_z[3]))
fvalue.append(dihe)
if len(remove_z):
for a in remove_z[1:]:
irzmat.append((3,a[0],a[1],a[2],a[3]))
# generate restrained dihedral information
zmat_ext = []
if len(ifzmat):
zmat_ext.append(" IFZMAT(1)=\n")
comma = ""
for a in ifzmat:
zmat_ext.append(comma+"%d,%d,%d,%d,%d\n"%a)
comma = ","
if len(fvalue):
zmat_ext.append(" FVALUE(1)=\n")
comma = ""
for a in fvalue:
zmat_ext.append(comma+"%1.7f\n"%a)
comma = ","
if len(irzmat):
zmat_ext.append(" IRZMAT(1)=\n")
comma = ""
for a in irzmat:
zmat_ext.append(comma+"%d,%d,%d,%d,%d\n"%a)
comma = ","
cmd.delete("_gamess1") # important
if len(zmat_ext):
return zmat_ext
else:
return None
def get_zmat_group(self,auto=1,dlc=1,zmat_extend=None):
gmsList = []
if auto and dlc:
if zmat_extend == None:
gmsList.append(" $ZMAT DLC=.TRUE. AUTO=.TRUE. $END\n")
else:
gmsList.append(" $ZMAT DLC=.TRUE. AUTO=.TRUE.\n")
gmsList.extend(zmat_extend)
gmsList.append(" $END\n")
else:
raise RuntimeError
return gmsList
def get_eldens_group(self,morb=0):
gmsList = []
gmsList.append(" $ELDENS IEDEN=1 MORB=%i \n" % morb)
gmsList.append("WHERE=GRID OUTPUT=PUNCH\n $END\n")
return gmsList
def get_elpot_group(self,morb=0):
gmsList = []
gmsList.append(" $ELPOT IEPOT=1 \n")
gmsList.append("WHERE=GRID OUTPUT=PUNCH\n $END\n")
return gmsList
def get_guess_group(self,guess='HUCKEL'):
return [" $GUESS GUESS=%s $END\n"%guess]
def get_grid_group(self,brick,z_step):
origin = (
brick.origin[0],
brick.origin[1],
brick.origin[2]+brick.grid[2]*z_step)
x_coord = (
brick.origin[0]+brick.range[0]+brick.grid[0]/100.0,
brick.origin[1],
brick.origin[2]+brick.grid[2]*z_step)
y_coord = (
brick.origin[0],
brick.origin[1]+brick.range[1]+brick.grid[1]/100.0,
brick.origin[2]+brick.grid[2]*z_step)
gmsList = [
" $GRID ORIGIN(1)=%12.5f,%12.5f,%12.5f\n" % origin,
"XVEC(1) = %12.5f,%12.5f,%12.5f\n" % x_coord,
"YVEC(1) = %12.5f,%12.5f,%12.5f\n" % y_coord,
"SIZE = %12.5f\n" % brick.grid[0],
" $END\n"
]
return gmsList
def get_scf(self,dirscf=1):
gmsList = []
if dirscf:
gmsList.append(" $SCF DIRSCF=.TRUE. $END\n")
return gmsList
def get_optimize_job(self,dirscf=1,zmat_extend=None):
gmsList = []
gmsList.extend(self.get_contrl_group(runtyp='OPTIMIZE'))
gmsList.extend(self.get_basis_group())
gmsList.extend(self.get_scf(dirscf=dirscf))
gmsList.extend(self.get_data_group())
gmsList.extend(self.get_zmat_group(zmat_extend=zmat_extend))
gmsList.append(" $STATPT NSTEP=50 $END\n")
return gmsList
def get_optimize_charge_job(self):
gmsList = self.get_optimize_job()
gmsList.append(" $ELPOT IEPOT=1 WHERE=PDC $END\n")
gmsList.append(" $PDC PTSEL=GEODESIC CONSTR=CHARGE $END\n")
return gmsList
def get_energy_charge_job(self):
gmsList = self.get_energy_job()
gmsList.append(" $ELPOT IEPOT=1 WHERE=PDC $END\n")
gmsList.append(" $PDC PTSEL=GEODESIC CONSTR=CHARGE $END\n")
return gmsList
def get_prop_job(self):
gmsList = []
gmsList.extend(self.get_contrl_group(runtyp = 'PROP',
nzvar=0))
gmsList.extend(self.get_guess_group(guess='MOREAD'))
self.update_data_coords()
gmsList.extend(self.data)
gmsList.extend(self.vec)
return gmsList
def get_energy_job(self):
gmsList=[]
gmsList.extend(self.get_contrl_group(
runtyp = 'ENERGY'
))
gmsList.extend(self.get_basis_group())
gmsList.extend(self.get_scf())
gmsList.extend(self.get_data_group())
gmsList.extend(self.get_zmat_group())
return gmsList
def get_density_job(self,brick,z_step,morb=0):
gmsList = self.get_prop_job()
gmsList.extend(self.get_eldens_group(morb=morb))
gmsList.extend(self.get_grid_group(brick,z_step))
return gmsList
def get_potential_job(self,brick,z_step):
gmsList = self.get_prop_job()
gmsList.extend(self.get_elpot_group())
gmsList.extend(self.get_grid_group(brick,z_step))
return gmsList
def get_prop_charge_job(self):
gmsList = self.get_prop_job()
gmsList.append(" $ELPOT IEPOT=1 WHERE=PDC $END\n")
gmsList.append(" $PDC PTSEL=GEODESIC CONSTR=CHARGE $END\n")
return gmsList
def get_density(self,brick,morb=0,run_prefix=None):
for a in xrange(brick.dim[2]):
gmsList = self.get_density_job(brick,a,morb=morb)
result = do(gmsList,punch=1,
run_prefix=run_prefix)
self.read_density_list(result[1],brick,a)
def get_potential(self,brick,run_prefix=None):
for a in xrange(brick.dim[2]):
gmsList = self.get_potential_job(brick,a)
result = do(gmsList,punch=1,
run_prefix=run_prefix)
self.read_potential_list(result[1],brick,a)
def get_charges(self,run_prefix=None):
gmsList = self.get_energy_job()
result = do(gmsList,output=1,punch=1,
run_prefix=run_prefix)
self.read_output_list(result[0])
self.read_punch_list(result[1])
def get_energy(self,run_prefix=None):
gmsList = self.get_energy_job()
result = do(gmsList,output=1,punch=1,
run_prefix=run_prefix)
self.read_output_list(result[0])
self.read_punch_list(result[1])
def get_optimized_energy(self,run_prefix=None,zmat_extend=None):
gmsList = self.get_optimize_job(zmat_extend=zmat_extend)
result = do(gmsList,output=1,punch=1,
run_prefix=run_prefix)
self.read_output_list(result[0])
self.read_punch_list(result[1])
def get_optimized_charges(self,run_prefix=None,skip=None):
gmsList = self.get_optimize_charge_job()
result = do(gmsList,output=1,punch=1,
run_prefix=run_prefix,skip=skip)
self.read_output_list(result[0])
self.read_punch_list(result[1])
def get_prop_charges(self,run_prefix=None):
gmsList = self.get_prop_charge_job()
result = do(gmsList,output=1,punch=1,
run_prefix=run_prefix)
self.read_output_list(result[0])
self.read_punch_list(result[1])
if os.environ.has_key('GAMESS'):
base = os.environ['GAMESS']
rungms_path = base + '/bin/rungms'
else:
base = ''
rungms_path = ''
|
gratefulfrog/lib
|
python/chempy/gamess1.py
|
Python
|
gpl-2.0
| 22,588
|
[
"Amber",
"ChemPy",
"GAMESS",
"PyMOL",
"TINKER"
] |
8339f69ac3b462be305997323cccbbcd3a6eca0e35a3cecb7e2bc6ea51405aea
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Example script for an automatically created colorbar for multiple plots
This script is part of the nc2map Python module, version 0.0b.
The nc2map.CbarManager allows you to control the colorbar settings (color map,
bounds, etc.) for multiple plots at the same time. This is especially useful if
you want to use automatically calculated bounds.
This script creates a pdf onecbar-demo.pdf.
Hint: If you want to see how to apply the formatoptions: Use the
nc2map.show_fmtkeys and nc2map.show_fmtdocs functions.
It requires the NetCDF file 'demo-t2m-u-v.nc' which you can find in the
nc2map/demo directory"""
import nc2map
output = 'onecbar-demo.pdf' # final output file of this script
ncfile = 'demo-t2m-u-v.nc' # name of the netCDF file with the data
fmt = {'figtitle': 'One colorbar demo plot, initial state',
# use the variable name, month and year as plot title
'title': '%(var)s, %B %Y',
'plotcbar': False, # do not plot colorbars under the maps
}
onecbar = [
# first the colorbar options for temperature (both time steps)
{'var': 't2m',
'cmap': 'white_blue_red',
'clabel': '%(long_name)s [%(units)s]',
},
# now colorbar options for u and v (Note: we could also use
# 'var': ['u', 'v'] instead of 'level': 1
{'level': 1,
'cmap': 'winter',
'plotcbar': 'r' # plot colorbar on the right side of the figure
},
# now colorbar options which apply to all of them
{'clabel': '%(long_name)s [%(units)s]'}
]
# now we set up exactly which time steps and so on we want to show.
# Therefore we specify the dimensions directly via their names, in order to
# use two different time steps for temperature but not for u and v)
names = {
'mapo0': { # name of the plot instance
'var': 't2m'}, # variable name
'mapo1': { # second plot
'var': 't2m',
'time': 1}, # time defaults to 0
'mapo2': { # third plot
'var': 'u',
'level': 1}, # vertical level step (defaults to 0)
'mapo3': { # fourth plot
'var': 'v',
'level': 1}}
mymaps = nc2map.Maps(ncfile,
ax=(2,2), # plot all variables into one figure
names=names,
fmt=fmt,
onecbar=onecbar)
# mymaps.show() # show the figure
# save figure at current state
pdf = mymaps.output(output, returnpdf=True)
# to update now the colorbar, we have to use the update_cbar method
mymaps.update(figtitle='One colorbar demo plot, first update: t2m cbar')
mymaps.update_cbar(var='t2m', bounds='minmax')
mymaps.output(pdf) # save the figure in the current state
mymaps.update(figtitle='One colorbar demo plot, second update: wind cbar')
mymaps.update_cbar(var='u', # select all colorbars that control maps of u
bounds='roundedsym', # draw rounded symmetric bounds
cmap='blue_white_red' # change colorbar
)
mymaps.output(pdf) # save the figure in the current state
pdf.close() # finally save the pdf
mymaps.close() # close the Maps instance
|
Chilipp/nc2map
|
demo/one_colorbar_demo.py
|
Python
|
gpl-2.0
| 3,173
|
[
"NetCDF"
] |
c56233d32478a2064d9747b70a6fb1167ed507f5c1b2ab24564a358478db81b5
|
__author__ = 'stephen'
import numpy as np
import mdtraj as md
import os, sys
def get_subindices(assignments=None, state=None, samples=10):
'''Get Subsamples assignments from same state'''
assignments = np.array(assignments)
if state is not None:
indices = np.where(np.array(assignments) == state)[0]
else:
indices = range(0, len(assignments))
if samples is not None:
if len(indices) > samples:
indices = np.random.choice(indices, size=samples)
return indices
def output_trajs(trajs=None, assignments=None, n_states=None, samples=10, output_name='output', output_type='.pdb' ):
for i in xrange(0, n_states):
indices =get_subindices(assignments, i, samples)
name = output_name + '_' + str(i) + '_' + output_type
trajs[indices].save(name)
def split_assignments(assignments, lengths):
'''Split a single long assignments into small segments by
the lengths of original trajectories.'''
if not sum(lengths) ==len(assignments):
#raise Exception('The lengths are not equal!')
raise Exception('sum(lengths)=%s, len(longlist)=%s' % (sum(lengths), len(assignments)))
def find_position(x):
length, cumlength = x
return assignments[cumlength - length: cumlength]
segments = [find_position(elem) for elem in zip(lengths, np.cumsum(lengths))]
return segments
def merge_assignment_segments(segments, length):
'''Merge small segments to a single long assignments.'''
if not sum(length) == segments.size:
raise Exception('The lengths are not equal!')
assignments = []
for i in segments:
assignments.expand(i)
return assignments
|
stephenliu1989/HK_DataMiner
|
hkdataminer/utils/utils.py
|
Python
|
apache-2.0
| 1,695
|
[
"MDTraj"
] |
57b6c38bfdaf5ae04a34ee1a2e7cf60d3056a17afb987f951429858017bd19d2
|
#!/usr/bin/env python
# PySCUBA/src/PySCUBA/Tree_classes.py
# Author: Gregory Giecold for the GC Yuan Lab
# Affiliation: Harvard University
# Contact: g.giecold@gmail.com; ggiecold@jimmy.harvard.edu
"""Classes for flow or mass cytometry data processing, modified from the outdated 'fcm'
Python package by Jacob Frelinger.
"""
import re
import numpy as np
__all__ = ['Tree']
class Node(object):
"""Base node object.
"""
def __init__(self, name, parent, data):
self.name = name
self.parent = parent
self.data = data
self.prefix = 'n'
def view(self):
"""Return the view of the data associated with this node.
"""
return self.data
def pprint(self, depth, size):
tmp = " " * depth + self.name
if size:
tmp = tmp + " " + str(self.view().shape[0])
return tmp + "\n"
def __getattr__(self, name):
if name == 'channels':
return self.parent.channels
else:
raise AttributeError("'{0}' has no attribue '{1}'".format(str(self.__class__), name))
class Root_node(Node):
"""Root node.
"""
def __init__(self, name, data, channels):
self.name = name
self.parent = None
self.data = data
self.channels = channels
self.prefix = 'root'
class Transform_node(Node):
"""Transformed data node.
"""
def __init__(self, name, parent, data):
self.name = name
self.parent = parent
self.data = data
self.prefix = 't'
def __getattr__(self, name):
if name == 'channels':
return self.parent.channels
else:
raise AttributeError("'{0}' has no attribue '{1}'".format(str(self.__class__), name))
class Subsample_node(Node):
"""Node of subsampled data.
"""
def __init__(self, name, parent, param):
self.name = name
self.parent = parent
self.param = param
self.prefix = 's'
if isinstance(param, tuple):
self.channels = self.parent.channels[param[1]]
def view(self):
"""Return the view of the data associated with this node.
"""
return self.parent.view().__getitem__(self.param)
class Drop_channel_node(Node):
"""Node of data without some channels.
"""
def __init__(self, name, parent, param, channels):
self.name = name
self.parent = parent
self.param = param
self.prefix = 'd'
self.channels = channels
def view(self):
"""Return the view of the data associated with this node.
"""
return self.parent.view()[:, self.param]
class Gating_node(Node):
"""Node of gated data.
"""
def __init__(self, name, parent, data):
self.name = name
self.parent = parent
self.data = data
self.prefix = 'g'
def view(self):
"""Return the view of the data associated with this node.
"""
if self.parent.view().shape[0] == 0:
return np.array([]).reshape(self.parent.view().shape)
return self.parent.view()[self.data]
def __getattr__(self, name):
if name == 'channels':
return self.parent.channels
else:
raise AttributeError("'{0}' has no attribue '{1}'".format(str(self.__class__), name))
class Tree(object):
"""Tree of data for a Cyto_data object
(the latter is defined in the 'Preprocessing' module from the present package).
"""
def __init__(self, data_points, channels):
self.nodes = {}
self.root = Root_node('root', data_points, channels)
self.nodes['root'] = self.root
self.current = self.root
def parent(self):
"""Return the parent of a node.
"""
return self.current.parent
def children(self, node = None):
"""Return the children of a node."""
if node == None:
node = self.current
return [i for i in self.nodes.values() if i.parent == node]
def visit(self, name):
"""Visit a node in the tree."""
if isinstance(name, str):
self.current = self.nodes[name]
elif isinstance(name, Node):
self.current = name
else:
raise KeyError("No node named {0}.".format(name))
def get(self, name = None):
"""Return the current node object."""
if name is None:
return self.current
else:
if name in self.nodes:
return self.nodes[name]
else:
raise KeyError("No node named {0}.".format(name))
def view(self):
"""Return a view of the current data.
"""
return self.current.view()
def add_child(self, name, node):
"""Add a node to the tree at the currently selected node.
"""
if name == '':
prefix = node.prefix
pat = re.compile(prefix + "(\d+)")
matches = [pat.search(i) for i in self.nodes]
matches = [i for i in matches if i is not None]
if len(matches):
n = max([ int(i.group(1)) for i in matches])
name = prefix + str(n + 1)
else:
name = prefix + '1'
if name in self.nodes.keys():
raise KeyError("Name {0} already in use in tree".format(name))
else:
node.name = name
self.nodes[name] = node
node.parent = self.current
self.current = self.nodes[name]
def rename_node(self, old_name, new_name):
"""Rename a node name;
D(old,new) -> rename old to new.
"""
if not self.nodes.has_key(old_name):
raise KeyError("No node named {0}.".format(old_name))
if self.nodes.has_key(new_name):
raise KeyError("There is already a node named {0}".format(new_name))
else:
self.nodes[new_name] = self.nodes[old_name]
self.nodes[new_name].name = new_name
del self.nodes[old_name]
def pprint(self, size = False):
return self._rpprint(self.root, 0, size)
def _rpprint(self, n, d, size = False):
tmp = n.pprint(d, size)
for i in self.children(n):
tmp += self._rpprint(i, d + 1, size)
return tmp
|
GGiecold/PySCUBA
|
src/PySCUBA/Tree_classes.py
|
Python
|
mit
| 6,533
|
[
"VisIt"
] |
6cccca314517c879b91e71252028a1ef91dfb39125626a374cef7971ed538309
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Example Bowtie App.
"""
from bowtie.control import Nouislider
from bowtie.visual import Plotly
import numpy as np
from numpy import random as rng
import plotlywrapper as pw
sigma = Nouislider(caption='Sigma', start=1, minimum=0.1, maximum=50)
mainplot = Plotly()
data = np.zeros(100).tolist()
def walk():
value = float(sigma.get())
data.pop(0)
data.append(value * rng.randn() + data[-1])
mainplot.do_all(pw.line(data).to_json())
if __name__ == "__main__":
from bowtie import Layout
layout = Layout(debug=False)
layout.add_controller(sigma)
layout.add_visual(mainplot)
layout.schedule(0.1, walk)
layout.build()
|
softwaremechanic/Miscellaneous
|
bowtieEx.py
|
Python
|
gpl-2.0
| 711
|
[
"Bowtie"
] |
c2d74def93df659e58786094e81e1ec3581f44cd7901ec972b06372f3795a4dd
|
"""Header value parser implementing various email-related RFC parsing rules.
The parsing methods defined in this module implement various email related
parsing rules. Principal among them is RFC 5322, which is the followon
to RFC 2822 and primarily a clarification of the former. It also implements
RFC 2047 encoded word decoding.
RFC 5322 goes to considerable trouble to maintain backward compatibility with
RFC 822 in the parse phase, while cleaning up the structure on the generation
phase. This parser supports correct RFC 5322 generation by tagging white space
as folding white space only when folding is allowed in the non-obsolete rule
sets. Actually, the parser is even more generous when accepting input than RFC
5322 mandates, following the spirit of Postel's Law, which RFC 5322 encourages.
Where possible deviations from the standard are annotated on the 'defects'
attribute of tokens that deviate.
The general structure of the parser follows RFC 5322, and uses its terminology
where there is a direct correspondence. Where the implementation requires a
somewhat different structure than that used by the formal grammar, new terms
that mimic the closest existing terms are used. Thus, it really helps to have
a copy of RFC 5322 handy when studying this code.
Input to the parser is a string that has already been unfolded according to
RFC 5322 rules. According to the RFC this unfolding is the very first step, and
this parser leaves the unfolding step to a higher level message parser, which
will have already detected the line breaks that need unfolding while
determining the beginning and end of each header.
The output of the parser is a TokenList object, which is a list subclass. A
TokenList is a recursive data structure. The terminal nodes of the structure
are Terminal objects, which are subclasses of str. These do not correspond
directly to terminal objects in the formal grammar, but are instead more
practical higher level combinations of true terminals.
All TokenList and Terminal objects have a 'value' attribute, which produces the
semantically meaningful value of that part of the parse subtree. The value of
all whitespace tokens (no matter how many sub-tokens they may contain) is a
single space, as per the RFC rules. This includes 'CFWS', which is herein
included in the general class of whitespace tokens. There is one exception to
the rule that whitespace tokens are collapsed into single spaces in values: in
the value of a 'bare-quoted-string' (a quoted-string with no leading or
trailing whitespace), any whitespace that appeared between the quotation marks
is preserved in the returned value. Note that in all Terminal strings quoted
pairs are turned into their unquoted values.
All TokenList and Terminal objects also have a string value, which attempts to
be a "canonical" representation of the RFC-compliant form of the substring that
produced the parsed subtree, including minimal use of quoted pair quoting.
Whitespace runs are not collapsed.
Comment tokens also have a 'content' attribute providing the string found
between the parens (including any nested comments) with whitespace preserved.
All TokenList and Terminal objects have a 'defects' attribute which is a
possibly empty list all of the defects found while creating the token. Defects
may appear on any token in the tree, and a composite list of all defects in the
subtree is available through the 'all_defects' attribute of any node. (For
Terminal notes x.defects == x.all_defects.)
Each object in a parse tree is called a 'token', and each has a 'token_type'
attribute that gives the name from the RFC 5322 grammar that it represents.
Not all RFC 5322 nodes are produced, and there is one non-RFC 5322 node that
may be produced: 'ptext'. A 'ptext' is a string of printable ascii characters.
It is returned in place of lists of (ctext/quoted-pair) and
(qtext/quoted-pair).
XXX: provide complete list of token types.
"""
import re
import urllib # For urllib.parse.unquote
from string import hexdigits
from collections import OrderedDict
from operator import itemgetter
from email import _encoded_words as _ew
from email import errors
from email import utils
#
# Useful constants and functions
#
WSP = set(' \t')
CFWS_LEADER = WSP | set('(')
SPECIALS = set(r'()<>@,:;.\"[]')
ATOM_ENDS = SPECIALS | WSP
DOT_ATOM_ENDS = ATOM_ENDS - set('.')
# '.', '"', and '(' do not end phrases in order to support obs-phrase
PHRASE_ENDS = SPECIALS - set('."(')
TSPECIALS = (SPECIALS | set('/?=')) - set('.')
TOKEN_ENDS = TSPECIALS | WSP
ASPECIALS = TSPECIALS | set("*'%")
ATTRIBUTE_ENDS = ASPECIALS | WSP
EXTENDED_ATTRIBUTE_ENDS = ATTRIBUTE_ENDS - set('%')
def quote_string(value):
return '"'+str(value).replace('\\', '\\\\').replace('"', r'\"')+'"'
#
# Accumulator for header folding
#
class _Folded:
def __init__(self, maxlen, policy):
self.maxlen = maxlen
self.policy = policy
self.lastlen = 0
self.stickyspace = None
self.firstline = True
self.done = []
self.current = []
def newline(self):
self.done.extend(self.current)
self.done.append(self.policy.linesep)
self.current.clear()
self.lastlen = 0
def finalize(self):
if self.current:
self.newline()
def __str__(self):
return ''.join(self.done)
def append(self, stoken):
self.current.append(stoken)
def append_if_fits(self, token, stoken=None):
if stoken is None:
stoken = str(token)
l = len(stoken)
if self.stickyspace is not None:
stickyspace_len = len(self.stickyspace)
if self.lastlen + stickyspace_len + l <= self.maxlen:
self.current.append(self.stickyspace)
self.lastlen += stickyspace_len
self.current.append(stoken)
self.lastlen += l
self.stickyspace = None
self.firstline = False
return True
if token.has_fws:
ws = token.pop_leading_fws()
if ws is not None:
self.stickyspace += str(ws)
stickyspace_len += len(ws)
token._fold(self)
return True
if stickyspace_len and l + 1 <= self.maxlen:
margin = self.maxlen - l
if 0 < margin < stickyspace_len:
trim = stickyspace_len - margin
self.current.append(self.stickyspace[:trim])
self.stickyspace = self.stickyspace[trim:]
stickyspace_len = trim
self.newline()
self.current.append(self.stickyspace)
self.current.append(stoken)
self.lastlen = l + stickyspace_len
self.stickyspace = None
self.firstline = False
return True
if not self.firstline:
self.newline()
self.current.append(self.stickyspace)
self.current.append(stoken)
self.stickyspace = None
self.firstline = False
return True
if self.lastlen + l <= self.maxlen:
self.current.append(stoken)
self.lastlen += l
return True
if l < self.maxlen:
self.newline()
self.current.append(stoken)
self.lastlen = l
return True
return False
#
# TokenList and its subclasses
#
class TokenList(list):
token_type = None
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
self.defects = []
def __str__(self):
return ''.join(str(x) for x in self)
def __repr__(self):
return '{}({})'.format(self.__class__.__name__,
super().__repr__())
@property
def value(self):
return ''.join(x.value for x in self if x.value)
@property
def all_defects(self):
return sum((x.all_defects for x in self), self.defects)
#
# Folding API
#
# parts():
#
# return a list of objects that constitute the "higher level syntactic
# objects" specified by the RFC as the best places to fold a header line.
# The returned objects must include leading folding white space, even if
# this means mutating the underlying parse tree of the object. Each object
# is only responsible for returning *its* parts, and should not drill down
# to any lower level except as required to meet the leading folding white
# space constraint.
#
# _fold(folded):
#
# folded: the result accumulator. This is an instance of _Folded.
# (XXX: I haven't finished factoring this out yet, the folding code
# pretty much uses this as a state object.) When the folded.current
# contains as much text as will fit, the _fold method should call
# folded.newline.
# folded.lastlen: the current length of the test stored in folded.current.
# folded.maxlen: The maximum number of characters that may appear on a
# folded line. Differs from the policy setting in that "no limit" is
# represented by +inf, which means it can be used in the trivially
# logical fashion in comparisons.
#
# Currently no subclasses implement parts, and I think this will remain
# true. A subclass only needs to implement _fold when the generic version
# isn't sufficient. _fold will need to be implemented primarily when it is
# possible for encoded words to appear in the specialized token-list, since
# there is no generic algorithm that can know where exactly the encoded
# words are allowed. A _fold implementation is responsible for filling
# lines in the same general way that the top level _fold does. It may, and
# should, call the _fold method of sub-objects in a similar fashion to that
# of the top level _fold.
#
# XXX: I'm hoping it will be possible to factor the existing code further
# to reduce redundancy and make the logic clearer.
@property
def parts(self):
klass = self.__class__
this = []
for token in self:
if token.startswith_fws():
if this:
yield this[0] if len(this)==1 else klass(this)
this.clear()
end_ws = token.pop_trailing_ws()
this.append(token)
if end_ws:
yield klass(this)
this = [end_ws]
if this:
yield this[0] if len(this)==1 else klass(this)
def startswith_fws(self):
return self[0].startswith_fws()
def pop_leading_fws(self):
if self[0].token_type == 'fws':
return self.pop(0)
return self[0].pop_leading_fws()
def pop_trailing_ws(self):
if self[-1].token_type == 'cfws':
return self.pop(-1)
return self[-1].pop_trailing_ws()
@property
def has_fws(self):
for part in self:
if part.has_fws:
return True
return False
def has_leading_comment(self):
return self[0].has_leading_comment()
@property
def comments(self):
comments = []
for token in self:
comments.extend(token.comments)
return comments
def fold(self, *, policy):
# max_line_length 0/None means no limit, ie: infinitely long.
maxlen = policy.max_line_length or float("+inf")
folded = _Folded(maxlen, policy)
self._fold(folded)
folded.finalize()
return str(folded)
def as_encoded_word(self, charset):
# This works only for things returned by 'parts', which include
# the leading fws, if any, that should be used.
res = []
ws = self.pop_leading_fws()
if ws:
res.append(ws)
trailer = self.pop(-1) if self[-1].token_type=='fws' else ''
res.append(_ew.encode(str(self), charset))
res.append(trailer)
return ''.join(res)
def cte_encode(self, charset, policy):
res = []
for part in self:
res.append(part.cte_encode(charset, policy))
return ''.join(res)
def _fold(self, folded):
encoding = 'utf-8' if folded.policy.utf8 else 'ascii'
for part in self.parts:
tstr = str(part)
tlen = len(tstr)
try:
str(part).encode(encoding)
except UnicodeEncodeError:
if any(isinstance(x, errors.UndecodableBytesDefect)
for x in part.all_defects):
charset = 'unknown-8bit'
else:
# XXX: this should be a policy setting when utf8 is False.
charset = 'utf-8'
tstr = part.cte_encode(charset, folded.policy)
tlen = len(tstr)
if folded.append_if_fits(part, tstr):
continue
# Peel off the leading whitespace if any and make it sticky, to
# avoid infinite recursion.
ws = part.pop_leading_fws()
if ws is not None:
# Peel off the leading whitespace and make it sticky, to
# avoid infinite recursion.
folded.stickyspace = str(part.pop(0))
if folded.append_if_fits(part):
continue
if part.has_fws:
part._fold(folded)
continue
# There are no fold points in this one; it is too long for a single
# line and can't be split...we just have to put it on its own line.
folded.append(tstr)
folded.newline()
def pprint(self, indent=''):
print('\n'.join(self._pp(indent='')))
def ppstr(self, indent=''):
return '\n'.join(self._pp(indent=''))
def _pp(self, indent=''):
yield '{}{}/{}('.format(
indent,
self.__class__.__name__,
self.token_type)
for token in self:
if not hasattr(token, '_pp'):
yield (indent + ' !! invalid element in token '
'list: {!r}'.format(token))
else:
yield from token._pp(indent+' ')
if self.defects:
extra = ' Defects: {}'.format(self.defects)
else:
extra = ''
yield '{}){}'.format(indent, extra)
class WhiteSpaceTokenList(TokenList):
@property
def value(self):
return ' '
@property
def comments(self):
return [x.content for x in self if x.token_type=='comment']
class UnstructuredTokenList(TokenList):
token_type = 'unstructured'
def _fold(self, folded):
last_ew = None
encoding = 'utf-8' if folded.policy.utf8 else 'ascii'
for part in self.parts:
tstr = str(part)
is_ew = False
try:
str(part).encode(encoding)
except UnicodeEncodeError:
if any(isinstance(x, errors.UndecodableBytesDefect)
for x in part.all_defects):
charset = 'unknown-8bit'
else:
charset = 'utf-8'
if last_ew is not None:
# We've already done an EW, combine this one with it
# if there's room.
chunk = get_unstructured(
''.join(folded.current[last_ew:]+[tstr])).as_encoded_word(charset)
oldlastlen = sum(len(x) for x in folded.current[:last_ew])
schunk = str(chunk)
lchunk = len(schunk)
if oldlastlen + lchunk <= folded.maxlen:
del folded.current[last_ew:]
folded.append(schunk)
folded.lastlen = oldlastlen + lchunk
continue
tstr = part.as_encoded_word(charset)
is_ew = True
if folded.append_if_fits(part, tstr):
if is_ew:
last_ew = len(folded.current) - 1
continue
if is_ew or last_ew:
# It's too big to fit on the line, but since we've
# got encoded words we can use encoded word folding.
part._fold_as_ew(folded)
continue
# Peel off the leading whitespace if any and make it sticky, to
# avoid infinite recursion.
ws = part.pop_leading_fws()
if ws is not None:
folded.stickyspace = str(ws)
if folded.append_if_fits(part):
continue
if part.has_fws:
part._fold(folded)
continue
# It can't be split...we just have to put it on its own line.
folded.append(tstr)
folded.newline()
last_ew = None
def cte_encode(self, charset, policy):
res = []
last_ew = None
for part in self:
spart = str(part)
try:
spart.encode('us-ascii')
res.append(spart)
except UnicodeEncodeError:
if last_ew is None:
res.append(part.cte_encode(charset, policy))
last_ew = len(res)
else:
tl = get_unstructured(''.join(res[last_ew:] + [spart]))
res.append(tl.as_encoded_word(charset))
return ''.join(res)
class Phrase(TokenList):
token_type = 'phrase'
def _fold(self, folded):
# As with Unstructured, we can have pure ASCII with or without
# surrogateescape encoded bytes, or we could have unicode. But this
# case is more complicated, since we have to deal with the various
# sub-token types and how they can be composed in the face of
# unicode-that-needs-CTE-encoding, and the fact that if a token a
# comment that becomes a barrier across which we can't compose encoded
# words.
last_ew = None
encoding = 'utf-8' if folded.policy.utf8 else 'ascii'
for part in self.parts:
tstr = str(part)
tlen = len(tstr)
has_ew = False
try:
str(part).encode(encoding)
except UnicodeEncodeError:
if any(isinstance(x, errors.UndecodableBytesDefect)
for x in part.all_defects):
charset = 'unknown-8bit'
else:
charset = 'utf-8'
if last_ew is not None and not part.has_leading_comment():
# We've already done an EW, let's see if we can combine
# this one with it. The last_ew logic ensures that all we
# have at this point is atoms, no comments or quoted
# strings. So we can treat the text between the last
# encoded word and the content of this token as
# unstructured text, and things will work correctly. But
# we have to strip off any trailing comment on this token
# first, and if it is a quoted string we have to pull out
# the content (we're encoding it, so it no longer needs to
# be quoted).
if part[-1].token_type == 'cfws' and part.comments:
remainder = part.pop(-1)
else:
remainder = ''
for i, token in enumerate(part):
if token.token_type == 'bare-quoted-string':
part[i] = UnstructuredTokenList(token[:])
chunk = get_unstructured(
''.join(folded.current[last_ew:]+[tstr])).as_encoded_word(charset)
schunk = str(chunk)
lchunk = len(schunk)
if last_ew + lchunk <= folded.maxlen:
del folded.current[last_ew:]
folded.append(schunk)
folded.lastlen = sum(len(x) for x in folded.current)
continue
tstr = part.as_encoded_word(charset)
tlen = len(tstr)
has_ew = True
if folded.append_if_fits(part, tstr):
if has_ew and not part.comments:
last_ew = len(folded.current) - 1
elif part.comments or part.token_type == 'quoted-string':
# If a comment is involved we can't combine EWs. And if a
# quoted string is involved, it's not worth the effort to
# try to combine them.
last_ew = None
continue
part._fold(folded)
def cte_encode(self, charset, policy):
res = []
last_ew = None
is_ew = False
for part in self:
spart = str(part)
try:
spart.encode('us-ascii')
res.append(spart)
except UnicodeEncodeError:
is_ew = True
if last_ew is None:
if not part.comments:
last_ew = len(res)
res.append(part.cte_encode(charset, policy))
elif not part.has_leading_comment():
if part[-1].token_type == 'cfws' and part.comments:
remainder = part.pop(-1)
else:
remainder = ''
for i, token in enumerate(part):
if token.token_type == 'bare-quoted-string':
part[i] = UnstructuredTokenList(token[:])
tl = get_unstructured(''.join(res[last_ew:] + [spart]))
res[last_ew:] = [tl.as_encoded_word(charset)]
if part.comments or (not is_ew and part.token_type == 'quoted-string'):
last_ew = None
return ''.join(res)
class Word(TokenList):
token_type = 'word'
class CFWSList(WhiteSpaceTokenList):
token_type = 'cfws'
def has_leading_comment(self):
return bool(self.comments)
class Atom(TokenList):
token_type = 'atom'
class Token(TokenList):
token_type = 'token'
class EncodedWord(TokenList):
token_type = 'encoded-word'
cte = None
charset = None
lang = None
@property
def encoded(self):
if self.cte is not None:
return self.cte
_ew.encode(str(self), self.charset)
class QuotedString(TokenList):
token_type = 'quoted-string'
@property
def content(self):
for x in self:
if x.token_type == 'bare-quoted-string':
return x.value
@property
def quoted_value(self):
res = []
for x in self:
if x.token_type == 'bare-quoted-string':
res.append(str(x))
else:
res.append(x.value)
return ''.join(res)
@property
def stripped_value(self):
for token in self:
if token.token_type == 'bare-quoted-string':
return token.value
class BareQuotedString(QuotedString):
token_type = 'bare-quoted-string'
def __str__(self):
return quote_string(''.join(str(x) for x in self))
@property
def value(self):
return ''.join(str(x) for x in self)
class Comment(WhiteSpaceTokenList):
token_type = 'comment'
def __str__(self):
return ''.join(sum([
["("],
[self.quote(x) for x in self],
[")"],
], []))
def quote(self, value):
if value.token_type == 'comment':
return str(value)
return str(value).replace('\\', '\\\\').replace(
'(', r'\(').replace(
')', r'\)')
@property
def content(self):
return ''.join(str(x) for x in self)
@property
def comments(self):
return [self.content]
class AddressList(TokenList):
token_type = 'address-list'
@property
def addresses(self):
return [x for x in self if x.token_type=='address']
@property
def mailboxes(self):
return sum((x.mailboxes
for x in self if x.token_type=='address'), [])
@property
def all_mailboxes(self):
return sum((x.all_mailboxes
for x in self if x.token_type=='address'), [])
class Address(TokenList):
token_type = 'address'
@property
def display_name(self):
if self[0].token_type == 'group':
return self[0].display_name
@property
def mailboxes(self):
if self[0].token_type == 'mailbox':
return [self[0]]
elif self[0].token_type == 'invalid-mailbox':
return []
return self[0].mailboxes
@property
def all_mailboxes(self):
if self[0].token_type == 'mailbox':
return [self[0]]
elif self[0].token_type == 'invalid-mailbox':
return [self[0]]
return self[0].all_mailboxes
class MailboxList(TokenList):
token_type = 'mailbox-list'
@property
def mailboxes(self):
return [x for x in self if x.token_type=='mailbox']
@property
def all_mailboxes(self):
return [x for x in self
if x.token_type in ('mailbox', 'invalid-mailbox')]
class GroupList(TokenList):
token_type = 'group-list'
@property
def mailboxes(self):
if not self or self[0].token_type != 'mailbox-list':
return []
return self[0].mailboxes
@property
def all_mailboxes(self):
if not self or self[0].token_type != 'mailbox-list':
return []
return self[0].all_mailboxes
class Group(TokenList):
token_type = "group"
@property
def mailboxes(self):
if self[2].token_type != 'group-list':
return []
return self[2].mailboxes
@property
def all_mailboxes(self):
if self[2].token_type != 'group-list':
return []
return self[2].all_mailboxes
@property
def display_name(self):
return self[0].display_name
class NameAddr(TokenList):
token_type = 'name-addr'
@property
def display_name(self):
if len(self) == 1:
return None
return self[0].display_name
@property
def local_part(self):
return self[-1].local_part
@property
def domain(self):
return self[-1].domain
@property
def route(self):
return self[-1].route
@property
def addr_spec(self):
return self[-1].addr_spec
class AngleAddr(TokenList):
token_type = 'angle-addr'
@property
def local_part(self):
for x in self:
if x.token_type == 'addr-spec':
return x.local_part
@property
def domain(self):
for x in self:
if x.token_type == 'addr-spec':
return x.domain
@property
def route(self):
for x in self:
if x.token_type == 'obs-route':
return x.domains
@property
def addr_spec(self):
for x in self:
if x.token_type == 'addr-spec':
return x.addr_spec
else:
return '<>'
class ObsRoute(TokenList):
token_type = 'obs-route'
@property
def domains(self):
return [x.domain for x in self if x.token_type == 'domain']
class Mailbox(TokenList):
token_type = 'mailbox'
@property
def display_name(self):
if self[0].token_type == 'name-addr':
return self[0].display_name
@property
def local_part(self):
return self[0].local_part
@property
def domain(self):
return self[0].domain
@property
def route(self):
if self[0].token_type == 'name-addr':
return self[0].route
@property
def addr_spec(self):
return self[0].addr_spec
class InvalidMailbox(TokenList):
token_type = 'invalid-mailbox'
@property
def display_name(self):
return None
local_part = domain = route = addr_spec = display_name
class Domain(TokenList):
token_type = 'domain'
@property
def domain(self):
return ''.join(super().value.split())
class DotAtom(TokenList):
token_type = 'dot-atom'
class DotAtomText(TokenList):
token_type = 'dot-atom-text'
class AddrSpec(TokenList):
token_type = 'addr-spec'
@property
def local_part(self):
return self[0].local_part
@property
def domain(self):
if len(self) < 3:
return None
return self[-1].domain
@property
def value(self):
if len(self) < 3:
return self[0].value
return self[0].value.rstrip()+self[1].value+self[2].value.lstrip()
@property
def addr_spec(self):
nameset = set(self.local_part)
if len(nameset) > len(nameset-DOT_ATOM_ENDS):
lp = quote_string(self.local_part)
else:
lp = self.local_part
if self.domain is not None:
return lp + '@' + self.domain
return lp
class ObsLocalPart(TokenList):
token_type = 'obs-local-part'
class DisplayName(Phrase):
token_type = 'display-name'
@property
def display_name(self):
res = TokenList(self)
if res[0].token_type == 'cfws':
res.pop(0)
else:
if res[0][0].token_type == 'cfws':
res[0] = TokenList(res[0][1:])
if res[-1].token_type == 'cfws':
res.pop()
else:
if res[-1][-1].token_type == 'cfws':
res[-1] = TokenList(res[-1][:-1])
return res.value
@property
def value(self):
quote = False
if self.defects:
quote = True
else:
for x in self:
if x.token_type == 'quoted-string':
quote = True
if quote:
pre = post = ''
if self[0].token_type=='cfws' or self[0][0].token_type=='cfws':
pre = ' '
if self[-1].token_type=='cfws' or self[-1][-1].token_type=='cfws':
post = ' '
return pre+quote_string(self.display_name)+post
else:
return super().value
class LocalPart(TokenList):
token_type = 'local-part'
@property
def value(self):
if self[0].token_type == "quoted-string":
return self[0].quoted_value
else:
return self[0].value
@property
def local_part(self):
# Strip whitespace from front, back, and around dots.
res = [DOT]
last = DOT
last_is_tl = False
for tok in self[0] + [DOT]:
if tok.token_type == 'cfws':
continue
if (last_is_tl and tok.token_type == 'dot' and
last[-1].token_type == 'cfws'):
res[-1] = TokenList(last[:-1])
is_tl = isinstance(tok, TokenList)
if (is_tl and last.token_type == 'dot' and
tok[0].token_type == 'cfws'):
res.append(TokenList(tok[1:]))
else:
res.append(tok)
last = res[-1]
last_is_tl = is_tl
res = TokenList(res[1:-1])
return res.value
class DomainLiteral(TokenList):
token_type = 'domain-literal'
@property
def domain(self):
return ''.join(super().value.split())
@property
def ip(self):
for x in self:
if x.token_type == 'ptext':
return x.value
class MIMEVersion(TokenList):
token_type = 'mime-version'
major = None
minor = None
class Parameter(TokenList):
token_type = 'parameter'
sectioned = False
extended = False
charset = 'us-ascii'
@property
def section_number(self):
# Because the first token, the attribute (name) eats CFWS, the second
# token is always the section if there is one.
return self[1].number if self.sectioned else 0
@property
def param_value(self):
# This is part of the "handle quoted extended parameters" hack.
for token in self:
if token.token_type == 'value':
return token.stripped_value
if token.token_type == 'quoted-string':
for token in token:
if token.token_type == 'bare-quoted-string':
for token in token:
if token.token_type == 'value':
return token.stripped_value
return ''
class InvalidParameter(Parameter):
token_type = 'invalid-parameter'
class Attribute(TokenList):
token_type = 'attribute'
@property
def stripped_value(self):
for token in self:
if token.token_type.endswith('attrtext'):
return token.value
class Section(TokenList):
token_type = 'section'
number = None
class Value(TokenList):
token_type = 'value'
@property
def stripped_value(self):
token = self[0]
if token.token_type == 'cfws':
token = self[1]
if token.token_type.endswith(
('quoted-string', 'attribute', 'extended-attribute')):
return token.stripped_value
return self.value
class MimeParameters(TokenList):
token_type = 'mime-parameters'
@property
def params(self):
# The RFC specifically states that the ordering of parameters is not
# guaranteed and may be reordered by the transport layer. So we have
# to assume the RFC 2231 pieces can come in any order. However, we
# output them in the order that we first see a given name, which gives
# us a stable __str__.
params = OrderedDict()
for token in self:
if not token.token_type.endswith('parameter'):
continue
if token[0].token_type != 'attribute':
continue
name = token[0].value.strip()
if name not in params:
params[name] = []
params[name].append((token.section_number, token))
for name, parts in params.items():
parts = sorted(parts, key=itemgetter(0))
first_param = parts[0][1]
charset = first_param.charset
# Our arbitrary error recovery is to ignore duplicate parameters,
# to use appearance order if there are duplicate rfc 2231 parts,
# and to ignore gaps. This mimics the error recovery of get_param.
if not first_param.extended and len(parts) > 1:
if parts[1][0] == 0:
parts[1][1].defects.append(errors.InvalidHeaderDefect(
'duplicate parameter name; duplicate(s) ignored'))
parts = parts[:1]
# Else assume the *0* was missing...note that this is different
# from get_param, but we registered a defect for this earlier.
value_parts = []
i = 0
for section_number, param in parts:
if section_number != i:
# We could get fancier here and look for a complete
# duplicate extended parameter and ignore the second one
# seen. But we're not doing that. The old code didn't.
if not param.extended:
param.defects.append(errors.InvalidHeaderDefect(
'duplicate parameter name; duplicate ignored'))
continue
else:
param.defects.append(errors.InvalidHeaderDefect(
"inconsistent RFC2231 parameter numbering"))
i += 1
value = param.param_value
if param.extended:
try:
value = urllib.parse.unquote_to_bytes(value)
except UnicodeEncodeError:
# source had surrogate escaped bytes. What we do now
# is a bit of an open question. I'm not sure this is
# the best choice, but it is what the old algorithm did
value = urllib.parse.unquote(value, encoding='latin-1')
else:
try:
value = value.decode(charset, 'surrogateescape')
except LookupError:
# XXX: there should really be a custom defect for
# unknown character set to make it easy to find,
# because otherwise unknown charset is a silent
# failure.
value = value.decode('us-ascii', 'surrogateescape')
if utils._has_surrogates(value):
param.defects.append(errors.UndecodableBytesDefect())
value_parts.append(value)
value = ''.join(value_parts)
yield name, value
def __str__(self):
params = []
for name, value in self.params:
if value:
params.append('{}={}'.format(name, quote_string(value)))
else:
params.append(name)
params = '; '.join(params)
return ' ' + params if params else ''
class ParameterizedHeaderValue(TokenList):
@property
def params(self):
for token in reversed(self):
if token.token_type == 'mime-parameters':
return token.params
return {}
@property
def parts(self):
if self and self[-1].token_type == 'mime-parameters':
# We don't want to start a new line if all of the params don't fit
# after the value, so unwrap the parameter list.
return TokenList(self[:-1] + self[-1])
return TokenList(self).parts
class ContentType(ParameterizedHeaderValue):
token_type = 'content-type'
maintype = 'text'
subtype = 'plain'
class ContentDisposition(ParameterizedHeaderValue):
token_type = 'content-disposition'
content_disposition = None
class ContentTransferEncoding(TokenList):
token_type = 'content-transfer-encoding'
cte = '7bit'
class HeaderLabel(TokenList):
token_type = 'header-label'
class Header(TokenList):
token_type = 'header'
def _fold(self, folded):
folded.append(str(self.pop(0)))
folded.lastlen = len(folded.current[0])
# The first line of the header is different from all others: we don't
# want to start a new object on a new line if it has any fold points in
# it that would allow part of it to be on the first header line.
# Further, if the first fold point would fit on the new line, we want
# to do that, but if it doesn't we want to put it on the first line.
# Folded supports this via the stickyspace attribute. If this
# attribute is not None, it does the special handling.
folded.stickyspace = str(self.pop(0)) if self[0].token_type == 'cfws' else ''
rest = self.pop(0)
if self:
raise ValueError("Malformed Header token list")
rest._fold(folded)
#
# Terminal classes and instances
#
class Terminal(str):
def __new__(cls, value, token_type):
self = super().__new__(cls, value)
self.token_type = token_type
self.defects = []
return self
def __repr__(self):
return "{}({})".format(self.__class__.__name__, super().__repr__())
@property
def all_defects(self):
return list(self.defects)
def _pp(self, indent=''):
return ["{}{}/{}({}){}".format(
indent,
self.__class__.__name__,
self.token_type,
super().__repr__(),
'' if not self.defects else ' {}'.format(self.defects),
)]
def cte_encode(self, charset, policy):
value = str(self)
try:
value.encode('us-ascii')
return value
except UnicodeEncodeError:
return _ew.encode(value, charset)
def pop_trailing_ws(self):
# This terminates the recursion.
return None
def pop_leading_fws(self):
# This terminates the recursion.
return None
@property
def comments(self):
return []
def has_leading_comment(self):
return False
def __getnewargs__(self):
return(str(self), self.token_type)
class WhiteSpaceTerminal(Terminal):
@property
def value(self):
return ' '
def startswith_fws(self):
return True
has_fws = True
class ValueTerminal(Terminal):
@property
def value(self):
return self
def startswith_fws(self):
return False
has_fws = False
def as_encoded_word(self, charset):
return _ew.encode(str(self), charset)
class EWWhiteSpaceTerminal(WhiteSpaceTerminal):
@property
def value(self):
return ''
@property
def encoded(self):
return self[:]
def __str__(self):
return ''
has_fws = True
# XXX these need to become classes and used as instances so
# that a program can't change them in a parse tree and screw
# up other parse trees. Maybe should have tests for that, too.
DOT = ValueTerminal('.', 'dot')
ListSeparator = ValueTerminal(',', 'list-separator')
RouteComponentMarker = ValueTerminal('@', 'route-component-marker')
#
# Parser
#
# Parse strings according to RFC822/2047/2822/5322 rules.
#
# This is a stateless parser. Each get_XXX function accepts a string and
# returns either a Terminal or a TokenList representing the RFC object named
# by the method and a string containing the remaining unparsed characters
# from the input. Thus a parser method consumes the next syntactic construct
# of a given type and returns a token representing the construct plus the
# unparsed remainder of the input string.
#
# For example, if the first element of a structured header is a 'phrase',
# then:
#
# phrase, value = get_phrase(value)
#
# returns the complete phrase from the start of the string value, plus any
# characters left in the string after the phrase is removed.
_wsp_splitter = re.compile(r'([{}]+)'.format(''.join(WSP))).split
_non_atom_end_matcher = re.compile(r"[^{}]+".format(
''.join(ATOM_ENDS).replace('\\','\\\\').replace(']',r'\]'))).match
_non_printable_finder = re.compile(r"[\x00-\x20\x7F]").findall
_non_token_end_matcher = re.compile(r"[^{}]+".format(
''.join(TOKEN_ENDS).replace('\\','\\\\').replace(']',r'\]'))).match
_non_attribute_end_matcher = re.compile(r"[^{}]+".format(
''.join(ATTRIBUTE_ENDS).replace('\\','\\\\').replace(']',r'\]'))).match
_non_extended_attribute_end_matcher = re.compile(r"[^{}]+".format(
''.join(EXTENDED_ATTRIBUTE_ENDS).replace(
'\\','\\\\').replace(']',r'\]'))).match
def _validate_xtext(xtext):
"""If input token contains ASCII non-printables, register a defect."""
non_printables = _non_printable_finder(xtext)
if non_printables:
xtext.defects.append(errors.NonPrintableDefect(non_printables))
if utils._has_surrogates(xtext):
xtext.defects.append(errors.UndecodableBytesDefect(
"Non-ASCII characters found in header token"))
def _get_ptext_to_endchars(value, endchars):
"""Scan printables/quoted-pairs until endchars and return unquoted ptext.
This function turns a run of qcontent, ccontent-without-comments, or
dtext-with-quoted-printables into a single string by unquoting any
quoted printables. It returns the string, the remaining value, and
a flag that is True iff there were any quoted printables decoded.
"""
fragment, *remainder = _wsp_splitter(value, 1)
vchars = []
escape = False
had_qp = False
for pos in range(len(fragment)):
if fragment[pos] == '\\':
if escape:
escape = False
had_qp = True
else:
escape = True
continue
if escape:
escape = False
elif fragment[pos] in endchars:
break
vchars.append(fragment[pos])
else:
pos = pos + 1
return ''.join(vchars), ''.join([fragment[pos:]] + remainder), had_qp
def get_fws(value):
"""FWS = 1*WSP
This isn't the RFC definition. We're using fws to represent tokens where
folding can be done, but when we are parsing the *un*folding has already
been done so we don't need to watch out for CRLF.
"""
newvalue = value.lstrip()
fws = WhiteSpaceTerminal(value[:len(value)-len(newvalue)], 'fws')
return fws, newvalue
def get_encoded_word(value):
""" encoded-word = "=?" charset "?" encoding "?" encoded-text "?="
"""
ew = EncodedWord()
if not value.startswith('=?'):
raise errors.HeaderParseError(
"expected encoded word but found {}".format(value))
tok, *remainder = value[2:].split('?=', 1)
if tok == value[2:]:
raise errors.HeaderParseError(
"expected encoded word but found {}".format(value))
remstr = ''.join(remainder)
if len(remstr) > 1 and remstr[0] in hexdigits and remstr[1] in hexdigits:
# The ? after the CTE was followed by an encoded word escape (=XX).
rest, *remainder = remstr.split('?=', 1)
tok = tok + '?=' + rest
if len(tok.split()) > 1:
ew.defects.append(errors.InvalidHeaderDefect(
"whitespace inside encoded word"))
ew.cte = value
value = ''.join(remainder)
try:
text, charset, lang, defects = _ew.decode('=?' + tok + '?=')
except ValueError:
raise errors.HeaderParseError(
"encoded word format invalid: '{}'".format(ew.cte))
ew.charset = charset
ew.lang = lang
ew.defects.extend(defects)
while text:
if text[0] in WSP:
token, text = get_fws(text)
ew.append(token)
continue
chars, *remainder = _wsp_splitter(text, 1)
vtext = ValueTerminal(chars, 'vtext')
_validate_xtext(vtext)
ew.append(vtext)
text = ''.join(remainder)
return ew, value
def get_unstructured(value):
"""unstructured = (*([FWS] vchar) *WSP) / obs-unstruct
obs-unstruct = *((*LF *CR *(obs-utext) *LF *CR)) / FWS)
obs-utext = %d0 / obs-NO-WS-CTL / LF / CR
obs-NO-WS-CTL is control characters except WSP/CR/LF.
So, basically, we have printable runs, plus control characters or nulls in
the obsolete syntax, separated by whitespace. Since RFC 2047 uses the
obsolete syntax in its specification, but requires whitespace on either
side of the encoded words, I can see no reason to need to separate the
non-printable-non-whitespace from the printable runs if they occur, so we
parse this into xtext tokens separated by WSP tokens.
Because an 'unstructured' value must by definition constitute the entire
value, this 'get' routine does not return a remaining value, only the
parsed TokenList.
"""
# XXX: but what about bare CR and LF? They might signal the start or
# end of an encoded word. YAGNI for now, since our current parsers
# will never send us strings with bare CR or LF.
unstructured = UnstructuredTokenList()
while value:
if value[0] in WSP:
token, value = get_fws(value)
unstructured.append(token)
continue
if value.startswith('=?'):
try:
token, value = get_encoded_word(value)
except errors.HeaderParseError:
# XXX: Need to figure out how to register defects when
# appropriate here.
pass
else:
have_ws = True
if len(unstructured) > 0:
if unstructured[-1].token_type != 'fws':
unstructured.defects.append(errors.InvalidHeaderDefect(
"missing whitespace before encoded word"))
have_ws = False
if have_ws and len(unstructured) > 1:
if unstructured[-2].token_type == 'encoded-word':
unstructured[-1] = EWWhiteSpaceTerminal(
unstructured[-1], 'fws')
unstructured.append(token)
continue
tok, *remainder = _wsp_splitter(value, 1)
vtext = ValueTerminal(tok, 'vtext')
_validate_xtext(vtext)
unstructured.append(vtext)
value = ''.join(remainder)
return unstructured
def get_qp_ctext(value):
r"""ctext = <printable ascii except \ ( )>
This is not the RFC ctext, since we are handling nested comments in comment
and unquoting quoted-pairs here. We allow anything except the '()'
characters, but if we find any ASCII other than the RFC defined printable
ASCII, a NonPrintableDefect is added to the token's defects list. Since
quoted pairs are converted to their unquoted values, what is returned is
a 'ptext' token. In this case it is a WhiteSpaceTerminal, so it's value
is ' '.
"""
ptext, value, _ = _get_ptext_to_endchars(value, '()')
ptext = WhiteSpaceTerminal(ptext, 'ptext')
_validate_xtext(ptext)
return ptext, value
def get_qcontent(value):
"""qcontent = qtext / quoted-pair
We allow anything except the DQUOTE character, but if we find any ASCII
other than the RFC defined printable ASCII, a NonPrintableDefect is
added to the token's defects list. Any quoted pairs are converted to their
unquoted values, so what is returned is a 'ptext' token. In this case it
is a ValueTerminal.
"""
ptext, value, _ = _get_ptext_to_endchars(value, '"')
ptext = ValueTerminal(ptext, 'ptext')
_validate_xtext(ptext)
return ptext, value
def get_atext(value):
"""atext = <matches _atext_matcher>
We allow any non-ATOM_ENDS in atext, but add an InvalidATextDefect to
the token's defects list if we find non-atext characters.
"""
m = _non_atom_end_matcher(value)
if not m:
raise errors.HeaderParseError(
"expected atext but found '{}'".format(value))
atext = m.group()
value = value[len(atext):]
atext = ValueTerminal(atext, 'atext')
_validate_xtext(atext)
return atext, value
def get_bare_quoted_string(value):
"""bare-quoted-string = DQUOTE *([FWS] qcontent) [FWS] DQUOTE
A quoted-string without the leading or trailing white space. Its
value is the text between the quote marks, with whitespace
preserved and quoted pairs decoded.
"""
if value[0] != '"':
raise errors.HeaderParseError(
"expected '\"' but found '{}'".format(value))
bare_quoted_string = BareQuotedString()
value = value[1:]
while value and value[0] != '"':
if value[0] in WSP:
token, value = get_fws(value)
elif value[:2] == '=?':
try:
token, value = get_encoded_word(value)
bare_quoted_string.defects.append(errors.InvalidHeaderDefect(
"encoded word inside quoted string"))
except errors.HeaderParseError:
token, value = get_qcontent(value)
else:
token, value = get_qcontent(value)
bare_quoted_string.append(token)
if not value:
bare_quoted_string.defects.append(errors.InvalidHeaderDefect(
"end of header inside quoted string"))
return bare_quoted_string, value
return bare_quoted_string, value[1:]
def get_comment(value):
"""comment = "(" *([FWS] ccontent) [FWS] ")"
ccontent = ctext / quoted-pair / comment
We handle nested comments here, and quoted-pair in our qp-ctext routine.
"""
if value and value[0] != '(':
raise errors.HeaderParseError(
"expected '(' but found '{}'".format(value))
comment = Comment()
value = value[1:]
while value and value[0] != ")":
if value[0] in WSP:
token, value = get_fws(value)
elif value[0] == '(':
token, value = get_comment(value)
else:
token, value = get_qp_ctext(value)
comment.append(token)
if not value:
comment.defects.append(errors.InvalidHeaderDefect(
"end of header inside comment"))
return comment, value
return comment, value[1:]
def get_cfws(value):
"""CFWS = (1*([FWS] comment) [FWS]) / FWS
"""
cfws = CFWSList()
while value and value[0] in CFWS_LEADER:
if value[0] in WSP:
token, value = get_fws(value)
else:
token, value = get_comment(value)
cfws.append(token)
return cfws, value
def get_quoted_string(value):
"""quoted-string = [CFWS] <bare-quoted-string> [CFWS]
'bare-quoted-string' is an intermediate class defined by this
parser and not by the RFC grammar. It is the quoted string
without any attached CFWS.
"""
quoted_string = QuotedString()
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
quoted_string.append(token)
token, value = get_bare_quoted_string(value)
quoted_string.append(token)
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
quoted_string.append(token)
return quoted_string, value
def get_atom(value):
"""atom = [CFWS] 1*atext [CFWS]
An atom could be an rfc2047 encoded word.
"""
atom = Atom()
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
atom.append(token)
if value and value[0] in ATOM_ENDS:
raise errors.HeaderParseError(
"expected atom but found '{}'".format(value))
if value.startswith('=?'):
try:
token, value = get_encoded_word(value)
except errors.HeaderParseError:
# XXX: need to figure out how to register defects when
# appropriate here.
token, value = get_atext(value)
else:
token, value = get_atext(value)
atom.append(token)
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
atom.append(token)
return atom, value
def get_dot_atom_text(value):
""" dot-text = 1*atext *("." 1*atext)
"""
dot_atom_text = DotAtomText()
if not value or value[0] in ATOM_ENDS:
raise errors.HeaderParseError("expected atom at a start of "
"dot-atom-text but found '{}'".format(value))
while value and value[0] not in ATOM_ENDS:
token, value = get_atext(value)
dot_atom_text.append(token)
if value and value[0] == '.':
dot_atom_text.append(DOT)
value = value[1:]
if dot_atom_text[-1] is DOT:
raise errors.HeaderParseError("expected atom at end of dot-atom-text "
"but found '{}'".format('.'+value))
return dot_atom_text, value
def get_dot_atom(value):
""" dot-atom = [CFWS] dot-atom-text [CFWS]
Any place we can have a dot atom, we could instead have an rfc2047 encoded
word.
"""
dot_atom = DotAtom()
if value[0] in CFWS_LEADER:
token, value = get_cfws(value)
dot_atom.append(token)
if value.startswith('=?'):
try:
token, value = get_encoded_word(value)
except errors.HeaderParseError:
# XXX: need to figure out how to register defects when
# appropriate here.
token, value = get_dot_atom_text(value)
else:
token, value = get_dot_atom_text(value)
dot_atom.append(token)
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
dot_atom.append(token)
return dot_atom, value
def get_word(value):
"""word = atom / quoted-string
Either atom or quoted-string may start with CFWS. We have to peel off this
CFWS first to determine which type of word to parse. Afterward we splice
the leading CFWS, if any, into the parsed sub-token.
If neither an atom or a quoted-string is found before the next special, a
HeaderParseError is raised.
The token returned is either an Atom or a QuotedString, as appropriate.
This means the 'word' level of the formal grammar is not represented in the
parse tree; this is because having that extra layer when manipulating the
parse tree is more confusing than it is helpful.
"""
if value[0] in CFWS_LEADER:
leader, value = get_cfws(value)
else:
leader = None
if value[0]=='"':
token, value = get_quoted_string(value)
elif value[0] in SPECIALS:
raise errors.HeaderParseError("Expected 'atom' or 'quoted-string' "
"but found '{}'".format(value))
else:
token, value = get_atom(value)
if leader is not None:
token[:0] = [leader]
return token, value
def get_phrase(value):
""" phrase = 1*word / obs-phrase
obs-phrase = word *(word / "." / CFWS)
This means a phrase can be a sequence of words, periods, and CFWS in any
order as long as it starts with at least one word. If anything other than
words is detected, an ObsoleteHeaderDefect is added to the token's defect
list. We also accept a phrase that starts with CFWS followed by a dot;
this is registered as an InvalidHeaderDefect, since it is not supported by
even the obsolete grammar.
"""
phrase = Phrase()
try:
token, value = get_word(value)
phrase.append(token)
except errors.HeaderParseError:
phrase.defects.append(errors.InvalidHeaderDefect(
"phrase does not start with word"))
while value and value[0] not in PHRASE_ENDS:
if value[0]=='.':
phrase.append(DOT)
phrase.defects.append(errors.ObsoleteHeaderDefect(
"period in 'phrase'"))
value = value[1:]
else:
try:
token, value = get_word(value)
except errors.HeaderParseError:
if value[0] in CFWS_LEADER:
token, value = get_cfws(value)
phrase.defects.append(errors.ObsoleteHeaderDefect(
"comment found without atom"))
else:
raise
phrase.append(token)
return phrase, value
def get_local_part(value):
""" local-part = dot-atom / quoted-string / obs-local-part
"""
local_part = LocalPart()
leader = None
if value[0] in CFWS_LEADER:
leader, value = get_cfws(value)
if not value:
raise errors.HeaderParseError(
"expected local-part but found '{}'".format(value))
try:
token, value = get_dot_atom(value)
except errors.HeaderParseError:
try:
token, value = get_word(value)
except errors.HeaderParseError:
if value[0] != '\\' and value[0] in PHRASE_ENDS:
raise
token = TokenList()
if leader is not None:
token[:0] = [leader]
local_part.append(token)
if value and (value[0]=='\\' or value[0] not in PHRASE_ENDS):
obs_local_part, value = get_obs_local_part(str(local_part) + value)
if obs_local_part.token_type == 'invalid-obs-local-part':
local_part.defects.append(errors.InvalidHeaderDefect(
"local-part is not dot-atom, quoted-string, or obs-local-part"))
else:
local_part.defects.append(errors.ObsoleteHeaderDefect(
"local-part is not a dot-atom (contains CFWS)"))
local_part[0] = obs_local_part
try:
local_part.value.encode('ascii')
except UnicodeEncodeError:
local_part.defects.append(errors.NonASCIILocalPartDefect(
"local-part contains non-ASCII characters)"))
return local_part, value
def get_obs_local_part(value):
""" obs-local-part = word *("." word)
"""
obs_local_part = ObsLocalPart()
last_non_ws_was_dot = False
while value and (value[0]=='\\' or value[0] not in PHRASE_ENDS):
if value[0] == '.':
if last_non_ws_was_dot:
obs_local_part.defects.append(errors.InvalidHeaderDefect(
"invalid repeated '.'"))
obs_local_part.append(DOT)
last_non_ws_was_dot = True
value = value[1:]
continue
elif value[0]=='\\':
obs_local_part.append(ValueTerminal(value[0],
'misplaced-special'))
value = value[1:]
obs_local_part.defects.append(errors.InvalidHeaderDefect(
"'\\' character outside of quoted-string/ccontent"))
last_non_ws_was_dot = False
continue
if obs_local_part and obs_local_part[-1].token_type != 'dot':
obs_local_part.defects.append(errors.InvalidHeaderDefect(
"missing '.' between words"))
try:
token, value = get_word(value)
last_non_ws_was_dot = False
except errors.HeaderParseError:
if value[0] not in CFWS_LEADER:
raise
token, value = get_cfws(value)
obs_local_part.append(token)
if (obs_local_part[0].token_type == 'dot' or
obs_local_part[0].token_type=='cfws' and
obs_local_part[1].token_type=='dot'):
obs_local_part.defects.append(errors.InvalidHeaderDefect(
"Invalid leading '.' in local part"))
if (obs_local_part[-1].token_type == 'dot' or
obs_local_part[-1].token_type=='cfws' and
obs_local_part[-2].token_type=='dot'):
obs_local_part.defects.append(errors.InvalidHeaderDefect(
"Invalid trailing '.' in local part"))
if obs_local_part.defects:
obs_local_part.token_type = 'invalid-obs-local-part'
return obs_local_part, value
def get_dtext(value):
r""" dtext = <printable ascii except \ [ ]> / obs-dtext
obs-dtext = obs-NO-WS-CTL / quoted-pair
We allow anything except the excluded characters, but if we find any
ASCII other than the RFC defined printable ASCII, a NonPrintableDefect is
added to the token's defects list. Quoted pairs are converted to their
unquoted values, so what is returned is a ptext token, in this case a
ValueTerminal. If there were quoted-printables, an ObsoleteHeaderDefect is
added to the returned token's defect list.
"""
ptext, value, had_qp = _get_ptext_to_endchars(value, '[]')
ptext = ValueTerminal(ptext, 'ptext')
if had_qp:
ptext.defects.append(errors.ObsoleteHeaderDefect(
"quoted printable found in domain-literal"))
_validate_xtext(ptext)
return ptext, value
def _check_for_early_dl_end(value, domain_literal):
if value:
return False
domain_literal.append(errors.InvalidHeaderDefect(
"end of input inside domain-literal"))
domain_literal.append(ValueTerminal(']', 'domain-literal-end'))
return True
def get_domain_literal(value):
""" domain-literal = [CFWS] "[" *([FWS] dtext) [FWS] "]" [CFWS]
"""
domain_literal = DomainLiteral()
if value[0] in CFWS_LEADER:
token, value = get_cfws(value)
domain_literal.append(token)
if not value:
raise errors.HeaderParseError("expected domain-literal")
if value[0] != '[':
raise errors.HeaderParseError("expected '[' at start of domain-literal "
"but found '{}'".format(value))
value = value[1:]
if _check_for_early_dl_end(value, domain_literal):
return domain_literal, value
domain_literal.append(ValueTerminal('[', 'domain-literal-start'))
if value[0] in WSP:
token, value = get_fws(value)
domain_literal.append(token)
token, value = get_dtext(value)
domain_literal.append(token)
if _check_for_early_dl_end(value, domain_literal):
return domain_literal, value
if value[0] in WSP:
token, value = get_fws(value)
domain_literal.append(token)
if _check_for_early_dl_end(value, domain_literal):
return domain_literal, value
if value[0] != ']':
raise errors.HeaderParseError("expected ']' at end of domain-literal "
"but found '{}'".format(value))
domain_literal.append(ValueTerminal(']', 'domain-literal-end'))
value = value[1:]
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
domain_literal.append(token)
return domain_literal, value
def get_domain(value):
""" domain = dot-atom / domain-literal / obs-domain
obs-domain = atom *("." atom))
"""
domain = Domain()
leader = None
if value[0] in CFWS_LEADER:
leader, value = get_cfws(value)
if not value:
raise errors.HeaderParseError(
"expected domain but found '{}'".format(value))
if value[0] == '[':
token, value = get_domain_literal(value)
if leader is not None:
token[:0] = [leader]
domain.append(token)
return domain, value
try:
token, value = get_dot_atom(value)
except errors.HeaderParseError:
token, value = get_atom(value)
if leader is not None:
token[:0] = [leader]
domain.append(token)
if value and value[0] == '.':
domain.defects.append(errors.ObsoleteHeaderDefect(
"domain is not a dot-atom (contains CFWS)"))
if domain[0].token_type == 'dot-atom':
domain[:] = domain[0]
while value and value[0] == '.':
domain.append(DOT)
token, value = get_atom(value[1:])
domain.append(token)
return domain, value
def get_addr_spec(value):
""" addr-spec = local-part "@" domain
"""
addr_spec = AddrSpec()
token, value = get_local_part(value)
addr_spec.append(token)
if not value or value[0] != '@':
addr_spec.defects.append(errors.InvalidHeaderDefect(
"add-spec local part with no domain"))
return addr_spec, value
addr_spec.append(ValueTerminal('@', 'address-at-symbol'))
token, value = get_domain(value[1:])
addr_spec.append(token)
return addr_spec, value
def get_obs_route(value):
""" obs-route = obs-domain-list ":"
obs-domain-list = *(CFWS / ",") "@" domain *("," [CFWS] ["@" domain])
Returns an obs-route token with the appropriate sub-tokens (that is,
there is no obs-domain-list in the parse tree).
"""
obs_route = ObsRoute()
while value and (value[0]==',' or value[0] in CFWS_LEADER):
if value[0] in CFWS_LEADER:
token, value = get_cfws(value)
obs_route.append(token)
elif value[0] == ',':
obs_route.append(ListSeparator)
value = value[1:]
if not value or value[0] != '@':
raise errors.HeaderParseError(
"expected obs-route domain but found '{}'".format(value))
obs_route.append(RouteComponentMarker)
token, value = get_domain(value[1:])
obs_route.append(token)
while value and value[0]==',':
obs_route.append(ListSeparator)
value = value[1:]
if not value:
break
if value[0] in CFWS_LEADER:
token, value = get_cfws(value)
obs_route.append(token)
if value[0] == '@':
obs_route.append(RouteComponentMarker)
token, value = get_domain(value[1:])
obs_route.append(token)
if not value:
raise errors.HeaderParseError("end of header while parsing obs-route")
if value[0] != ':':
raise errors.HeaderParseError( "expected ':' marking end of "
"obs-route but found '{}'".format(value))
obs_route.append(ValueTerminal(':', 'end-of-obs-route-marker'))
return obs_route, value[1:]
def get_angle_addr(value):
""" angle-addr = [CFWS] "<" addr-spec ">" [CFWS] / obs-angle-addr
obs-angle-addr = [CFWS] "<" obs-route addr-spec ">" [CFWS]
"""
angle_addr = AngleAddr()
if value[0] in CFWS_LEADER:
token, value = get_cfws(value)
angle_addr.append(token)
if not value or value[0] != '<':
raise errors.HeaderParseError(
"expected angle-addr but found '{}'".format(value))
angle_addr.append(ValueTerminal('<', 'angle-addr-start'))
value = value[1:]
# Although it is not legal per RFC5322, SMTP uses '<>' in certain
# circumstances.
if value[0] == '>':
angle_addr.append(ValueTerminal('>', 'angle-addr-end'))
angle_addr.defects.append(errors.InvalidHeaderDefect(
"null addr-spec in angle-addr"))
value = value[1:]
return angle_addr, value
try:
token, value = get_addr_spec(value)
except errors.HeaderParseError:
try:
token, value = get_obs_route(value)
angle_addr.defects.append(errors.ObsoleteHeaderDefect(
"obsolete route specification in angle-addr"))
except errors.HeaderParseError:
raise errors.HeaderParseError(
"expected addr-spec or obs-route but found '{}'".format(value))
angle_addr.append(token)
token, value = get_addr_spec(value)
angle_addr.append(token)
if value and value[0] == '>':
value = value[1:]
else:
angle_addr.defects.append(errors.InvalidHeaderDefect(
"missing trailing '>' on angle-addr"))
angle_addr.append(ValueTerminal('>', 'angle-addr-end'))
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
angle_addr.append(token)
return angle_addr, value
def get_display_name(value):
""" display-name = phrase
Because this is simply a name-rule, we don't return a display-name
token containing a phrase, but rather a display-name token with
the content of the phrase.
"""
display_name = DisplayName()
token, value = get_phrase(value)
display_name.extend(token[:])
display_name.defects = token.defects[:]
return display_name, value
def get_name_addr(value):
""" name-addr = [display-name] angle-addr
"""
name_addr = NameAddr()
# Both the optional display name and the angle-addr can start with cfws.
leader = None
if value[0] in CFWS_LEADER:
leader, value = get_cfws(value)
if not value:
raise errors.HeaderParseError(
"expected name-addr but found '{}'".format(leader))
if value[0] != '<':
if value[0] in PHRASE_ENDS:
raise errors.HeaderParseError(
"expected name-addr but found '{}'".format(value))
token, value = get_display_name(value)
if not value:
raise errors.HeaderParseError(
"expected name-addr but found '{}'".format(token))
if leader is not None:
token[0][:0] = [leader]
leader = None
name_addr.append(token)
token, value = get_angle_addr(value)
if leader is not None:
token[:0] = [leader]
name_addr.append(token)
return name_addr, value
def get_mailbox(value):
""" mailbox = name-addr / addr-spec
"""
# The only way to figure out if we are dealing with a name-addr or an
# addr-spec is to try parsing each one.
mailbox = Mailbox()
try:
token, value = get_name_addr(value)
except errors.HeaderParseError:
try:
token, value = get_addr_spec(value)
except errors.HeaderParseError:
raise errors.HeaderParseError(
"expected mailbox but found '{}'".format(value))
if any(isinstance(x, errors.InvalidHeaderDefect)
for x in token.all_defects):
mailbox.token_type = 'invalid-mailbox'
mailbox.append(token)
return mailbox, value
def get_invalid_mailbox(value, endchars):
""" Read everything up to one of the chars in endchars.
This is outside the formal grammar. The InvalidMailbox TokenList that is
returned acts like a Mailbox, but the data attributes are None.
"""
invalid_mailbox = InvalidMailbox()
while value and value[0] not in endchars:
if value[0] in PHRASE_ENDS:
invalid_mailbox.append(ValueTerminal(value[0],
'misplaced-special'))
value = value[1:]
else:
token, value = get_phrase(value)
invalid_mailbox.append(token)
return invalid_mailbox, value
def get_mailbox_list(value):
""" mailbox-list = (mailbox *("," mailbox)) / obs-mbox-list
obs-mbox-list = *([CFWS] ",") mailbox *("," [mailbox / CFWS])
For this routine we go outside the formal grammar in order to improve error
handling. We recognize the end of the mailbox list only at the end of the
value or at a ';' (the group terminator). This is so that we can turn
invalid mailboxes into InvalidMailbox tokens and continue parsing any
remaining valid mailboxes. We also allow all mailbox entries to be null,
and this condition is handled appropriately at a higher level.
"""
mailbox_list = MailboxList()
while value and value[0] != ';':
try:
token, value = get_mailbox(value)
mailbox_list.append(token)
except errors.HeaderParseError:
leader = None
if value[0] in CFWS_LEADER:
leader, value = get_cfws(value)
if not value or value[0] in ',;':
mailbox_list.append(leader)
mailbox_list.defects.append(errors.ObsoleteHeaderDefect(
"empty element in mailbox-list"))
else:
token, value = get_invalid_mailbox(value, ',;')
if leader is not None:
token[:0] = [leader]
mailbox_list.append(token)
mailbox_list.defects.append(errors.InvalidHeaderDefect(
"invalid mailbox in mailbox-list"))
elif value[0] == ',':
mailbox_list.defects.append(errors.ObsoleteHeaderDefect(
"empty element in mailbox-list"))
else:
token, value = get_invalid_mailbox(value, ',;')
if leader is not None:
token[:0] = [leader]
mailbox_list.append(token)
mailbox_list.defects.append(errors.InvalidHeaderDefect(
"invalid mailbox in mailbox-list"))
if value and value[0] not in ',;':
# Crap after mailbox; treat it as an invalid mailbox.
# The mailbox info will still be available.
mailbox = mailbox_list[-1]
mailbox.token_type = 'invalid-mailbox'
token, value = get_invalid_mailbox(value, ',;')
mailbox.extend(token)
mailbox_list.defects.append(errors.InvalidHeaderDefect(
"invalid mailbox in mailbox-list"))
if value and value[0] == ',':
mailbox_list.append(ListSeparator)
value = value[1:]
return mailbox_list, value
def get_group_list(value):
""" group-list = mailbox-list / CFWS / obs-group-list
obs-group-list = 1*([CFWS] ",") [CFWS]
"""
group_list = GroupList()
if not value:
group_list.defects.append(errors.InvalidHeaderDefect(
"end of header before group-list"))
return group_list, value
leader = None
if value and value[0] in CFWS_LEADER:
leader, value = get_cfws(value)
if not value:
# This should never happen in email parsing, since CFWS-only is a
# legal alternative to group-list in a group, which is the only
# place group-list appears.
group_list.defects.append(errors.InvalidHeaderDefect(
"end of header in group-list"))
group_list.append(leader)
return group_list, value
if value[0] == ';':
group_list.append(leader)
return group_list, value
token, value = get_mailbox_list(value)
if len(token.all_mailboxes)==0:
if leader is not None:
group_list.append(leader)
group_list.extend(token)
group_list.defects.append(errors.ObsoleteHeaderDefect(
"group-list with empty entries"))
return group_list, value
if leader is not None:
token[:0] = [leader]
group_list.append(token)
return group_list, value
def get_group(value):
""" group = display-name ":" [group-list] ";" [CFWS]
"""
group = Group()
token, value = get_display_name(value)
if not value or value[0] != ':':
raise errors.HeaderParseError("expected ':' at end of group "
"display name but found '{}'".format(value))
group.append(token)
group.append(ValueTerminal(':', 'group-display-name-terminator'))
value = value[1:]
if value and value[0] == ';':
group.append(ValueTerminal(';', 'group-terminator'))
return group, value[1:]
token, value = get_group_list(value)
group.append(token)
if not value:
group.defects.append(errors.InvalidHeaderDefect(
"end of header in group"))
if value[0] != ';':
raise errors.HeaderParseError(
"expected ';' at end of group but found {}".format(value))
group.append(ValueTerminal(';', 'group-terminator'))
value = value[1:]
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
group.append(token)
return group, value
def get_address(value):
""" address = mailbox / group
Note that counter-intuitively, an address can be either a single address or
a list of addresses (a group). This is why the returned Address object has
a 'mailboxes' attribute which treats a single address as a list of length
one. When you need to differentiate between to two cases, extract the single
element, which is either a mailbox or a group token.
"""
# The formal grammar isn't very helpful when parsing an address. mailbox
# and group, especially when allowing for obsolete forms, start off very
# similarly. It is only when you reach one of @, <, or : that you know
# what you've got. So, we try each one in turn, starting with the more
# likely of the two. We could perhaps make this more efficient by looking
# for a phrase and then branching based on the next character, but that
# would be a premature optimization.
address = Address()
try:
token, value = get_group(value)
except errors.HeaderParseError:
try:
token, value = get_mailbox(value)
except errors.HeaderParseError:
raise errors.HeaderParseError(
"expected address but found '{}'".format(value))
address.append(token)
return address, value
def get_address_list(value):
""" address_list = (address *("," address)) / obs-addr-list
obs-addr-list = *([CFWS] ",") address *("," [address / CFWS])
We depart from the formal grammar here by continuing to parse until the end
of the input, assuming the input to be entirely composed of an
address-list. This is always true in email parsing, and allows us
to skip invalid addresses to parse additional valid ones.
"""
address_list = AddressList()
while value:
try:
token, value = get_address(value)
address_list.append(token)
except errors.HeaderParseError as err:
leader = None
if value[0] in CFWS_LEADER:
leader, value = get_cfws(value)
if not value or value[0] == ',':
address_list.append(leader)
address_list.defects.append(errors.ObsoleteHeaderDefect(
"address-list entry with no content"))
else:
token, value = get_invalid_mailbox(value, ',')
if leader is not None:
token[:0] = [leader]
address_list.append(Address([token]))
address_list.defects.append(errors.InvalidHeaderDefect(
"invalid address in address-list"))
elif value[0] == ',':
address_list.defects.append(errors.ObsoleteHeaderDefect(
"empty element in address-list"))
else:
token, value = get_invalid_mailbox(value, ',')
if leader is not None:
token[:0] = [leader]
address_list.append(Address([token]))
address_list.defects.append(errors.InvalidHeaderDefect(
"invalid address in address-list"))
if value and value[0] != ',':
# Crap after address; treat it as an invalid mailbox.
# The mailbox info will still be available.
mailbox = address_list[-1][0]
mailbox.token_type = 'invalid-mailbox'
token, value = get_invalid_mailbox(value, ',')
mailbox.extend(token)
address_list.defects.append(errors.InvalidHeaderDefect(
"invalid address in address-list"))
if value: # Must be a , at this point.
address_list.append(ValueTerminal(',', 'list-separator'))
value = value[1:]
return address_list, value
#
# XXX: As I begin to add additional header parsers, I'm realizing we probably
# have two level of parser routines: the get_XXX methods that get a token in
# the grammar, and parse_XXX methods that parse an entire field value. So
# get_address_list above should really be a parse_ method, as probably should
# be get_unstructured.
#
def parse_mime_version(value):
""" mime-version = [CFWS] 1*digit [CFWS] "." [CFWS] 1*digit [CFWS]
"""
# The [CFWS] is implicit in the RFC 2045 BNF.
# XXX: This routine is a bit verbose, should factor out a get_int method.
mime_version = MIMEVersion()
if not value:
mime_version.defects.append(errors.HeaderMissingRequiredValue(
"Missing MIME version number (eg: 1.0)"))
return mime_version
if value[0] in CFWS_LEADER:
token, value = get_cfws(value)
mime_version.append(token)
if not value:
mime_version.defects.append(errors.HeaderMissingRequiredValue(
"Expected MIME version number but found only CFWS"))
digits = ''
while value and value[0] != '.' and value[0] not in CFWS_LEADER:
digits += value[0]
value = value[1:]
if not digits.isdigit():
mime_version.defects.append(errors.InvalidHeaderDefect(
"Expected MIME major version number but found {!r}".format(digits)))
mime_version.append(ValueTerminal(digits, 'xtext'))
else:
mime_version.major = int(digits)
mime_version.append(ValueTerminal(digits, 'digits'))
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
mime_version.append(token)
if not value or value[0] != '.':
if mime_version.major is not None:
mime_version.defects.append(errors.InvalidHeaderDefect(
"Incomplete MIME version; found only major number"))
if value:
mime_version.append(ValueTerminal(value, 'xtext'))
return mime_version
mime_version.append(ValueTerminal('.', 'version-separator'))
value = value[1:]
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
mime_version.append(token)
if not value:
if mime_version.major is not None:
mime_version.defects.append(errors.InvalidHeaderDefect(
"Incomplete MIME version; found only major number"))
return mime_version
digits = ''
while value and value[0] not in CFWS_LEADER:
digits += value[0]
value = value[1:]
if not digits.isdigit():
mime_version.defects.append(errors.InvalidHeaderDefect(
"Expected MIME minor version number but found {!r}".format(digits)))
mime_version.append(ValueTerminal(digits, 'xtext'))
else:
mime_version.minor = int(digits)
mime_version.append(ValueTerminal(digits, 'digits'))
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
mime_version.append(token)
if value:
mime_version.defects.append(errors.InvalidHeaderDefect(
"Excess non-CFWS text after MIME version"))
mime_version.append(ValueTerminal(value, 'xtext'))
return mime_version
def get_invalid_parameter(value):
""" Read everything up to the next ';'.
This is outside the formal grammar. The InvalidParameter TokenList that is
returned acts like a Parameter, but the data attributes are None.
"""
invalid_parameter = InvalidParameter()
while value and value[0] != ';':
if value[0] in PHRASE_ENDS:
invalid_parameter.append(ValueTerminal(value[0],
'misplaced-special'))
value = value[1:]
else:
token, value = get_phrase(value)
invalid_parameter.append(token)
return invalid_parameter, value
def get_ttext(value):
"""ttext = <matches _ttext_matcher>
We allow any non-TOKEN_ENDS in ttext, but add defects to the token's
defects list if we find non-ttext characters. We also register defects for
*any* non-printables even though the RFC doesn't exclude all of them,
because we follow the spirit of RFC 5322.
"""
m = _non_token_end_matcher(value)
if not m:
raise errors.HeaderParseError(
"expected ttext but found '{}'".format(value))
ttext = m.group()
value = value[len(ttext):]
ttext = ValueTerminal(ttext, 'ttext')
_validate_xtext(ttext)
return ttext, value
def get_token(value):
"""token = [CFWS] 1*ttext [CFWS]
The RFC equivalent of ttext is any US-ASCII chars except space, ctls, or
tspecials. We also exclude tabs even though the RFC doesn't.
The RFC implies the CFWS but is not explicit about it in the BNF.
"""
mtoken = Token()
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
mtoken.append(token)
if value and value[0] in TOKEN_ENDS:
raise errors.HeaderParseError(
"expected token but found '{}'".format(value))
token, value = get_ttext(value)
mtoken.append(token)
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
mtoken.append(token)
return mtoken, value
def get_attrtext(value):
"""attrtext = 1*(any non-ATTRIBUTE_ENDS character)
We allow any non-ATTRIBUTE_ENDS in attrtext, but add defects to the
token's defects list if we find non-attrtext characters. We also register
defects for *any* non-printables even though the RFC doesn't exclude all of
them, because we follow the spirit of RFC 5322.
"""
m = _non_attribute_end_matcher(value)
if not m:
raise errors.HeaderParseError(
"expected attrtext but found {!r}".format(value))
attrtext = m.group()
value = value[len(attrtext):]
attrtext = ValueTerminal(attrtext, 'attrtext')
_validate_xtext(attrtext)
return attrtext, value
def get_attribute(value):
""" [CFWS] 1*attrtext [CFWS]
This version of the BNF makes the CFWS explicit, and as usual we use a
value terminal for the actual run of characters. The RFC equivalent of
attrtext is the token characters, with the subtraction of '*', "'", and '%'.
We include tab in the excluded set just as we do for token.
"""
attribute = Attribute()
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
attribute.append(token)
if value and value[0] in ATTRIBUTE_ENDS:
raise errors.HeaderParseError(
"expected token but found '{}'".format(value))
token, value = get_attrtext(value)
attribute.append(token)
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
attribute.append(token)
return attribute, value
def get_extended_attrtext(value):
"""attrtext = 1*(any non-ATTRIBUTE_ENDS character plus '%')
This is a special parsing routine so that we get a value that
includes % escapes as a single string (which we decode as a single
string later).
"""
m = _non_extended_attribute_end_matcher(value)
if not m:
raise errors.HeaderParseError(
"expected extended attrtext but found {!r}".format(value))
attrtext = m.group()
value = value[len(attrtext):]
attrtext = ValueTerminal(attrtext, 'extended-attrtext')
_validate_xtext(attrtext)
return attrtext, value
def get_extended_attribute(value):
""" [CFWS] 1*extended_attrtext [CFWS]
This is like the non-extended version except we allow % characters, so that
we can pick up an encoded value as a single string.
"""
# XXX: should we have an ExtendedAttribute TokenList?
attribute = Attribute()
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
attribute.append(token)
if value and value[0] in EXTENDED_ATTRIBUTE_ENDS:
raise errors.HeaderParseError(
"expected token but found '{}'".format(value))
token, value = get_extended_attrtext(value)
attribute.append(token)
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
attribute.append(token)
return attribute, value
def get_section(value):
""" '*' digits
The formal BNF is more complicated because leading 0s are not allowed. We
check for that and add a defect. We also assume no CFWS is allowed between
the '*' and the digits, though the RFC is not crystal clear on that.
The caller should already have dealt with leading CFWS.
"""
section = Section()
if not value or value[0] != '*':
raise errors.HeaderParseError("Expected section but found {}".format(
value))
section.append(ValueTerminal('*', 'section-marker'))
value = value[1:]
if not value or not value[0].isdigit():
raise errors.HeaderParseError("Expected section number but "
"found {}".format(value))
digits = ''
while value and value[0].isdigit():
digits += value[0]
value = value[1:]
if digits[0] == '0' and digits != '0':
section.defects.append(errors.InvalidHeaderError("section number"
"has an invalid leading 0"))
section.number = int(digits)
section.append(ValueTerminal(digits, 'digits'))
return section, value
def get_value(value):
""" quoted-string / attribute
"""
v = Value()
if not value:
raise errors.HeaderParseError("Expected value but found end of string")
leader = None
if value[0] in CFWS_LEADER:
leader, value = get_cfws(value)
if not value:
raise errors.HeaderParseError("Expected value but found "
"only {}".format(leader))
if value[0] == '"':
token, value = get_quoted_string(value)
else:
token, value = get_extended_attribute(value)
if leader is not None:
token[:0] = [leader]
v.append(token)
return v, value
def get_parameter(value):
""" attribute [section] ["*"] [CFWS] "=" value
The CFWS is implied by the RFC but not made explicit in the BNF. This
simplified form of the BNF from the RFC is made to conform with the RFC BNF
through some extra checks. We do it this way because it makes both error
recovery and working with the resulting parse tree easier.
"""
# It is possible CFWS would also be implicitly allowed between the section
# and the 'extended-attribute' marker (the '*') , but we've never seen that
# in the wild and we will therefore ignore the possibility.
param = Parameter()
token, value = get_attribute(value)
param.append(token)
if not value or value[0] == ';':
param.defects.append(errors.InvalidHeaderDefect("Parameter contains "
"name ({}) but no value".format(token)))
return param, value
if value[0] == '*':
try:
token, value = get_section(value)
param.sectioned = True
param.append(token)
except errors.HeaderParseError:
pass
if not value:
raise errors.HeaderParseError("Incomplete parameter")
if value[0] == '*':
param.append(ValueTerminal('*', 'extended-parameter-marker'))
value = value[1:]
param.extended = True
if value[0] != '=':
raise errors.HeaderParseError("Parameter not followed by '='")
param.append(ValueTerminal('=', 'parameter-separator'))
value = value[1:]
leader = None
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
param.append(token)
remainder = None
appendto = param
if param.extended and value and value[0] == '"':
# Now for some serious hackery to handle the common invalid case of
# double quotes around an extended value. We also accept (with defect)
# a value marked as encoded that isn't really.
qstring, remainder = get_quoted_string(value)
inner_value = qstring.stripped_value
semi_valid = False
if param.section_number == 0:
if inner_value and inner_value[0] == "'":
semi_valid = True
else:
token, rest = get_attrtext(inner_value)
if rest and rest[0] == "'":
semi_valid = True
else:
try:
token, rest = get_extended_attrtext(inner_value)
except:
pass
else:
if not rest:
semi_valid = True
if semi_valid:
param.defects.append(errors.InvalidHeaderDefect(
"Quoted string value for extended parameter is invalid"))
param.append(qstring)
for t in qstring:
if t.token_type == 'bare-quoted-string':
t[:] = []
appendto = t
break
value = inner_value
else:
remainder = None
param.defects.append(errors.InvalidHeaderDefect(
"Parameter marked as extended but appears to have a "
"quoted string value that is non-encoded"))
if value and value[0] == "'":
token = None
else:
token, value = get_value(value)
if not param.extended or param.section_number > 0:
if not value or value[0] != "'":
appendto.append(token)
if remainder is not None:
assert not value, value
value = remainder
return param, value
param.defects.append(errors.InvalidHeaderDefect(
"Apparent initial-extended-value but attribute "
"was not marked as extended or was not initial section"))
if not value:
# Assume the charset/lang is missing and the token is the value.
param.defects.append(errors.InvalidHeaderDefect(
"Missing required charset/lang delimiters"))
appendto.append(token)
if remainder is None:
return param, value
else:
if token is not None:
for t in token:
if t.token_type == 'extended-attrtext':
break
t.token_type == 'attrtext'
appendto.append(t)
param.charset = t.value
if value[0] != "'":
raise errors.HeaderParseError("Expected RFC2231 char/lang encoding "
"delimiter, but found {!r}".format(value))
appendto.append(ValueTerminal("'", 'RFC2231 delimiter'))
value = value[1:]
if value and value[0] != "'":
token, value = get_attrtext(value)
appendto.append(token)
param.lang = token.value
if not value or value[0] != "'":
raise errors.HeaderParseError("Expected RFC2231 char/lang encoding "
"delimiter, but found {}".format(value))
appendto.append(ValueTerminal("'", 'RFC2231 delimiter'))
value = value[1:]
if remainder is not None:
# Treat the rest of value as bare quoted string content.
v = Value()
while value:
if value[0] in WSP:
token, value = get_fws(value)
else:
token, value = get_qcontent(value)
v.append(token)
token = v
else:
token, value = get_value(value)
appendto.append(token)
if remainder is not None:
assert not value, value
value = remainder
return param, value
def parse_mime_parameters(value):
""" parameter *( ";" parameter )
That BNF is meant to indicate this routine should only be called after
finding and handling the leading ';'. There is no corresponding rule in
the formal RFC grammar, but it is more convenient for us for the set of
parameters to be treated as its own TokenList.
This is 'parse' routine because it consumes the reminaing value, but it
would never be called to parse a full header. Instead it is called to
parse everything after the non-parameter value of a specific MIME header.
"""
mime_parameters = MimeParameters()
while value:
try:
token, value = get_parameter(value)
mime_parameters.append(token)
except errors.HeaderParseError as err:
leader = None
if value[0] in CFWS_LEADER:
leader, value = get_cfws(value)
if not value:
mime_parameters.append(leader)
return mime_parameters
if value[0] == ';':
if leader is not None:
mime_parameters.append(leader)
mime_parameters.defects.append(errors.InvalidHeaderDefect(
"parameter entry with no content"))
else:
token, value = get_invalid_parameter(value)
if leader:
token[:0] = [leader]
mime_parameters.append(token)
mime_parameters.defects.append(errors.InvalidHeaderDefect(
"invalid parameter {!r}".format(token)))
if value and value[0] != ';':
# Junk after the otherwise valid parameter. Mark it as
# invalid, but it will have a value.
param = mime_parameters[-1]
param.token_type = 'invalid-parameter'
token, value = get_invalid_parameter(value)
param.extend(token)
mime_parameters.defects.append(errors.InvalidHeaderDefect(
"parameter with invalid trailing text {!r}".format(token)))
if value:
# Must be a ';' at this point.
mime_parameters.append(ValueTerminal(';', 'parameter-separator'))
value = value[1:]
return mime_parameters
def _find_mime_parameters(tokenlist, value):
"""Do our best to find the parameters in an invalid MIME header
"""
while value and value[0] != ';':
if value[0] in PHRASE_ENDS:
tokenlist.append(ValueTerminal(value[0], 'misplaced-special'))
value = value[1:]
else:
token, value = get_phrase(value)
tokenlist.append(token)
if not value:
return
tokenlist.append(ValueTerminal(';', 'parameter-separator'))
tokenlist.append(parse_mime_parameters(value[1:]))
def parse_content_type_header(value):
""" maintype "/" subtype *( ";" parameter )
The maintype and substype are tokens. Theoretically they could
be checked against the official IANA list + x-token, but we
don't do that.
"""
ctype = ContentType()
recover = False
if not value:
ctype.defects.append(errors.HeaderMissingRequiredValue(
"Missing content type specification"))
return ctype
try:
token, value = get_token(value)
except errors.HeaderParseError:
ctype.defects.append(errors.InvalidHeaderDefect(
"Expected content maintype but found {!r}".format(value)))
_find_mime_parameters(ctype, value)
return ctype
ctype.append(token)
# XXX: If we really want to follow the formal grammar we should make
# mantype and subtype specialized TokenLists here. Probably not worth it.
if not value or value[0] != '/':
ctype.defects.append(errors.InvalidHeaderDefect(
"Invalid content type"))
if value:
_find_mime_parameters(ctype, value)
return ctype
ctype.maintype = token.value.strip().lower()
ctype.append(ValueTerminal('/', 'content-type-separator'))
value = value[1:]
try:
token, value = get_token(value)
except errors.HeaderParseError:
ctype.defects.append(errors.InvalidHeaderDefect(
"Expected content subtype but found {!r}".format(value)))
_find_mime_parameters(ctype, value)
return ctype
ctype.append(token)
ctype.subtype = token.value.strip().lower()
if not value:
return ctype
if value[0] != ';':
ctype.defects.append(errors.InvalidHeaderDefect(
"Only parameters are valid after content type, but "
"found {!r}".format(value)))
# The RFC requires that a syntactically invalid content-type be treated
# as text/plain. Perhaps we should postel this, but we should probably
# only do that if we were checking the subtype value against IANA.
del ctype.maintype, ctype.subtype
_find_mime_parameters(ctype, value)
return ctype
ctype.append(ValueTerminal(';', 'parameter-separator'))
ctype.append(parse_mime_parameters(value[1:]))
return ctype
def parse_content_disposition_header(value):
""" disposition-type *( ";" parameter )
"""
disp_header = ContentDisposition()
if not value:
disp_header.defects.append(errors.HeaderMissingRequiredValue(
"Missing content disposition"))
return disp_header
try:
token, value = get_token(value)
except errors.HeaderParseError:
disp_header.defects.append(errors.InvalidHeaderDefect(
"Expected content disposition but found {!r}".format(value)))
_find_mime_parameters(disp_header, value)
return disp_header
disp_header.append(token)
disp_header.content_disposition = token.value.strip().lower()
if not value:
return disp_header
if value[0] != ';':
disp_header.defects.append(errors.InvalidHeaderDefect(
"Only parameters are valid after content disposition, but "
"found {!r}".format(value)))
_find_mime_parameters(disp_header, value)
return disp_header
disp_header.append(ValueTerminal(';', 'parameter-separator'))
disp_header.append(parse_mime_parameters(value[1:]))
return disp_header
def parse_content_transfer_encoding_header(value):
""" mechanism
"""
# We should probably validate the values, since the list is fixed.
cte_header = ContentTransferEncoding()
if not value:
cte_header.defects.append(errors.HeaderMissingRequiredValue(
"Missing content transfer encoding"))
return cte_header
try:
token, value = get_token(value)
except errors.HeaderParseError:
cte_header.defects.append(errors.InvalidHeaderDefect(
"Expected content transfer encoding but found {!r}".format(value)))
else:
cte_header.append(token)
cte_header.cte = token.value.strip().lower()
if not value:
return cte_header
while value:
cte_header.defects.append(errors.InvalidHeaderDefect(
"Extra text after content transfer encoding"))
if value[0] in PHRASE_ENDS:
cte_header.append(ValueTerminal(value[0], 'misplaced-special'))
value = value[1:]
else:
token, value = get_phrase(value)
cte_header.append(token)
return cte_header
|
yotchang4s/cafebabepy
|
src/main/python/email/_header_value_parser.py
|
Python
|
bsd-3-clause
| 105,218
|
[
"CRYSTAL"
] |
82b8bf3e3cd9c9a2fe9a123e17010b8ae0fef8e03fb3acc07ebc4ede20203f2b
|
import cStringIO
import numpy as np
import theano.tensor as T
from theano.tests import disturb_mem
import warnings
from pylearn2.costs.cost import Cost, SumOfCosts, DefaultDataSpecsMixin
from pylearn2.devtools.record import Record, RecordMode
from pylearn2.datasets.dense_design_matrix import DenseDesignMatrix
from pylearn2.models.model import Model
from pylearn2.monitor import Monitor
from pylearn2.space import CompositeSpace, Conv2DSpace, VectorSpace
from pylearn2.termination_criteria import EpochCounter
from pylearn2.testing.cost import CallbackCost, SumOfParams
from pylearn2.testing.datasets import ArangeDataset
from pylearn2.train import Train
from pylearn2.training_algorithms.sgd import (ExponentialDecay,
MomentumAdjustor,
PolyakAveraging,
LinearDecay,
LinearDecayOverEpoch,
MonitorBasedLRAdjuster,
SGD)
from pylearn2.utils.iteration import _iteration_schemes
from pylearn2.utils import safe_izip, safe_union, sharedX
class SupervisedDummyCost(DefaultDataSpecsMixin, Cost):
supervised = True
def expr(self, model, data):
space, sources = self.get_data_specs(model)
space.validate(data)
(X, Y) = data
return T.square(model(X) - Y).mean()
class DummyCost(DefaultDataSpecsMixin, Cost):
def expr(self, model, data):
space, sources = self.get_data_specs(model)
space.validate(data)
X = data
return T.square(model(X) - X).mean()
class DummyModel(Model):
def __init__(self, shapes, lr_scalers=None):
self._params = [sharedX(np.random.random(shape)) for shape in shapes]
self.input_space = VectorSpace(1)
self.lr_scalers = lr_scalers
def __call__(self, X):
# Implemented only so that DummyCost would work
return X
def get_lr_scalers(self):
if self.lr_scalers:
return dict(zip(self._params, self.lr_scalers))
else:
return dict()
class SoftmaxModel(Model):
"""A dummy model used for testing.
Important properties:
has a parameter (P) for SGD to act on
has a get_output_space method, so it can tell the
algorithm what kind of space the targets for supervised
learning live in
has a get_input_space method, so it can tell the
algorithm what kind of space the features live in
"""
def __init__(self, dim):
self.dim = dim
rng = np.random.RandomState([2012, 9, 25])
self.P = sharedX(rng.uniform(-1., 1., (dim, )))
def get_params(self):
return [self.P]
def get_input_space(self):
return VectorSpace(self.dim)
def get_output_space(self):
return VectorSpace(self.dim)
def __call__(self, X):
# Make the test fail if algorithm does not
# respect get_input_space
assert X.ndim == 2
# Multiplying by P ensures the shape as well
# as ndim is correct
return T.nnet.softmax(X*self.P)
class TopoSoftmaxModel(Model):
"""A dummy model used for testing.
Like SoftmaxModel but its features have 2 topological
dimensions. This tests that the training algorithm
will provide topological data correctly.
"""
def __init__(self, rows, cols, channels):
dim = rows * cols * channels
self.input_space = Conv2DSpace((rows, cols), channels)
self.dim = dim
rng = np.random.RandomState([2012, 9, 25])
self.P = sharedX(rng.uniform(-1., 1., (dim, )))
def get_params(self):
return [self.P]
def get_output_space(self):
return VectorSpace(self.dim)
def __call__(self, X):
# Make the test fail if algorithm does not
# respect get_input_space
assert X.ndim == 4
# Multiplying by P ensures the shape as well
# as ndim is correct
return T.nnet.softmax(X.reshape((X.shape[0], self.dim)) * self.P)
def test_sgd_unspec_num_mon_batch():
# tests that if you don't specify a number of
# monitoring batches, SGD configures the monitor
# to run on all the data
m = 25
visited = [False] * m
rng = np.random.RandomState([25, 9, 2012])
X = np.zeros((m, 1))
X[:, 0] = np.arange(m)
dataset = DenseDesignMatrix(X=X)
model = SoftmaxModel(1)
learning_rate = 1e-3
batch_size = 5
cost = DummyCost()
algorithm = SGD(learning_rate,
cost,
batch_size=batch_size,
monitoring_batches=None,
monitoring_dataset=dataset,
termination_criterion=None,
update_callbacks=None,
init_momentum=None,
set_batch_size=False)
algorithm.setup(dataset=dataset, model=model)
monitor = Monitor.get_monitor(model)
X = T.matrix()
def tracker(*data):
X, = data
assert X.shape[1] == 1
for i in xrange(X.shape[0]):
visited[int(X[i, 0])] = True
monitor.add_channel(name='tracker',
ipt=X,
val=0.,
prereqs=[tracker],
data_specs=(model.get_input_space(),
model.get_input_source()))
monitor()
if False in visited:
print visited
assert False
def test_sgd_sup():
# tests that we can run the sgd algorithm
# on a supervised cost.
# does not test for correctness at all, just
# that the algorithm runs without dying
dim = 3
m = 10
rng = np.random.RandomState([25, 9, 2012])
X = rng.randn(m, dim)
idx = rng.randint(0, dim, (m, ))
Y = np.zeros((m, dim))
for i in xrange(m):
Y[i, idx[i]] = 1
dataset = DenseDesignMatrix(X=X, y=Y)
m = 15
X = rng.randn(m, dim)
idx = rng.randint(0, dim, (m,))
Y = np.zeros((m, dim))
for i in xrange(m):
Y[i, idx[i]] = 1
# Including a monitoring dataset lets us test that
# the monitor works with supervised data
monitoring_dataset = DenseDesignMatrix(X=X, y=Y)
model = SoftmaxModel(dim)
learning_rate = 1e-3
batch_size = 5
cost = SupervisedDummyCost()
# We need to include this so the test actually stops running at some point
termination_criterion = EpochCounter(5)
algorithm = SGD(learning_rate, cost,
batch_size=batch_size,
monitoring_batches=3,
monitoring_dataset=monitoring_dataset,
termination_criterion=termination_criterion,
update_callbacks=None,
init_momentum=None,
set_batch_size=False)
train = Train(dataset,
model,
algorithm,
save_path=None,
save_freq=0,
extensions=None)
train.main_loop()
def test_sgd_unsup():
# tests that we can run the sgd algorithm
# on an supervised cost.
# does not test for correctness at all, just
# that the algorithm runs without dying
dim = 3
m = 10
rng = np.random.RandomState([25, 9, 2012])
X = rng.randn(m, dim)
dataset = DenseDesignMatrix(X=X)
m = 15
X = rng.randn(m, dim)
# Including a monitoring dataset lets us test that
# the monitor works with unsupervised data
monitoring_dataset = DenseDesignMatrix(X=X)
model = SoftmaxModel(dim)
learning_rate = 1e-3
batch_size = 5
cost = DummyCost()
# We need to include this so the test actually stops running at some point
termination_criterion = EpochCounter(5)
algorithm = SGD(learning_rate,
cost,
batch_size=batch_size,
monitoring_batches=3,
monitoring_dataset=monitoring_dataset,
termination_criterion=termination_criterion,
update_callbacks=None,
init_momentum=None,
set_batch_size=False)
train = Train(dataset,
model,
algorithm,
save_path=None,
save_freq=0,
extensions=None)
train.main_loop()
def get_topological_dataset(rng, rows, cols, channels, m):
X = rng.randn(m, rows, cols, channels)
dim = rows * cols * channels
idx = rng.randint(0, dim, (m,))
Y = np.zeros((m, dim))
for i in xrange(m):
Y[i, idx[i]] = 1
return DenseDesignMatrix(topo_view=X, y=Y)
def test_linear_decay():
# tests that the class LinearDecay in sgd.py
# gets the learning rate properly over the training batches
# it runs a small softmax and at the end checks the learning values.
# the learning rates are expected to start changing at batch 'start'
# by an amount of 'step' specified below.
# the decrease of the learning rate should continue linearly until
# we reach batch 'saturate' at which the learning rate equals
# 'learning_rate * decay_factor'
class LearningRateTracker(object):
def __init__(self):
self.lr_rates = []
def __call__(self, algorithm):
self.lr_rates.append(algorithm.learning_rate.get_value())
dim = 3
dataset_size = 10
rng = np.random.RandomState([25, 9, 2012])
X = rng.randn(dataset_size, dim)
dataset = DenseDesignMatrix(X=X)
m = 15
X = rng.randn(m, dim)
# including a monitoring datasets lets us test that
# the monitor works with supervised data
monitoring_dataset = DenseDesignMatrix(X=X)
model = SoftmaxModel(dim)
learning_rate = 1e-1
batch_size = 5
# We need to include this so the test actually stops running at some point
epoch_num = 15
termination_criterion = EpochCounter(epoch_num)
cost = DummyCost()
start = 5
saturate = 10
decay_factor = 0.1
linear_decay = LinearDecay(start=start, saturate=saturate,
decay_factor=decay_factor)
# including this extension for saving learning rate value after each batch
lr_tracker = LearningRateTracker()
algorithm = SGD(learning_rate,
cost,
batch_size=batch_size,
monitoring_batches=3,
monitoring_dataset=monitoring_dataset,
termination_criterion=termination_criterion,
update_callbacks=[linear_decay, lr_tracker],
init_momentum=None,
set_batch_size=False)
train = Train(dataset,
model,
algorithm,
save_path=None,
save_freq=0,
extensions=None)
train.main_loop()
step = (learning_rate - learning_rate*decay_factor)/(saturate - start + 1)
num_batches = np.ceil(dataset_size / float(batch_size)).astype(int)
for i in xrange(epoch_num * num_batches):
actual = lr_tracker.lr_rates[i]
batches_seen = i + 1
if batches_seen < start:
expected = learning_rate
elif batches_seen >= saturate:
expected = learning_rate*decay_factor
elif (start <= batches_seen) and (batches_seen < saturate):
expected = (decay_factor * learning_rate +
(saturate - batches_seen) * step)
if not np.allclose(actual, expected):
raise AssertionError("After %d batches, expected learning rate to "
"be %f, but it is %f." %
(batches_seen, expected, actual))
def test_linear_decay_over_epoch():
# tests that the class LinearDecayOverEpoch in sgd.py
# gets the learning rate properly over the training epochs
# it runs a small softmax and at the end checks the learning values.
# the learning rates are expected to start changing at epoch 'start' by an
# amount of 'step' specified below.
# the decrease of the learning rate should continue linearly until we
# reach epoch 'saturate' at which the learning rate equals
# 'learning_rate * decay_factor'
dim = 3
m = 10
rng = np.random.RandomState([25, 9, 2012])
X = rng.randn(m, dim)
dataset = DenseDesignMatrix(X=X)
m = 15
X = rng.randn(m, dim)
# including a monitoring datasets lets us test that
# the monitor works with supervised data
monitoring_dataset = DenseDesignMatrix(X=X)
model = SoftmaxModel(dim)
learning_rate = 1e-1
batch_size = 5
# We need to include this so the test actually stops running at some point
epoch_num = 15
termination_criterion = EpochCounter(epoch_num)
cost = DummyCost()
algorithm = SGD(learning_rate, cost, batch_size=batch_size,
monitoring_batches=3,
monitoring_dataset=monitoring_dataset,
termination_criterion=termination_criterion,
update_callbacks=None,
init_momentum=None,
set_batch_size=False)
start = 5
saturate = 10
decay_factor = 0.1
linear_decay = LinearDecayOverEpoch(start=start,
saturate=saturate,
decay_factor=decay_factor)
train = Train(dataset,
model,
algorithm,
save_path=None,
save_freq=0,
extensions=[linear_decay])
train.main_loop()
lr = model.monitor.channels['learning_rate']
step = (learning_rate - learning_rate*decay_factor)/(saturate - start + 1)
for i in xrange(epoch_num + 1):
actual = lr.val_record[i]
if i < start:
expected = learning_rate
elif i >= saturate:
expected = learning_rate*decay_factor
elif (start <= i) and (i < saturate):
expected = decay_factor * learning_rate + (saturate - i) * step
if not np.allclose(actual, expected):
raise AssertionError("After %d epochs, expected learning rate to "
"be %f, but it is %f." %
(i, expected, actual))
def test_monitor_based_lr():
# tests that the class MonitorBasedLRAdjuster in sgd.py
# gets the learning rate properly over the training epochs
# it runs a small softmax and at the end checks the learning values. It
# runs 2 loops. Each loop evaluates one of the if clauses when checking
# the observation channels. Otherwise, longer training epochs are needed
# to observe both if and elif cases.
high_trigger = 1.0
shrink_amt = 0.99
low_trigger = 0.99
grow_amt = 1.01
min_lr = 1e-7
max_lr = 1.
dim = 3
m = 10
rng = np.random.RandomState([25, 9, 2012])
X = rng.randn(m, dim)
dataset = DenseDesignMatrix(X=X)
m = 15
X = rng.randn(m, dim)
learning_rate = 1e-2
batch_size = 5
# We need to include this so the test actually stops running at some point
epoch_num = 5
# including a monitoring datasets lets us test that
# the monitor works with supervised data
monitoring_dataset = DenseDesignMatrix(X=X)
cost = DummyCost()
for i in xrange(2):
if i == 1:
high_trigger = 0.99
model = SoftmaxModel(dim)
termination_criterion = EpochCounter(epoch_num)
algorithm = SGD(learning_rate,
cost,
batch_size=batch_size,
monitoring_batches=3,
monitoring_dataset=monitoring_dataset,
termination_criterion=termination_criterion,
update_callbacks=None,
init_momentum=None,
set_batch_size=False)
monitor_lr = MonitorBasedLRAdjuster(high_trigger=high_trigger,
shrink_amt=shrink_amt,
low_trigger=low_trigger,
grow_amt=grow_amt,
min_lr=min_lr,
max_lr=max_lr)
train = Train(dataset,
model,
algorithm,
save_path=None,
save_freq=0,
extensions=[monitor_lr])
train.main_loop()
v = model.monitor.channels['objective'].val_record
lr = model.monitor.channels['learning_rate'].val_record
lr_monitor = learning_rate
for i in xrange(2, epoch_num + 1):
if v[i-1] > high_trigger * v[i-2]:
lr_monitor *= shrink_amt
elif v[i-1] > low_trigger * v[i-2]:
lr_monitor *= grow_amt
lr_monitor = max(min_lr, lr_monitor)
lr_monitor = min(max_lr, lr_monitor)
assert np.allclose(lr_monitor, lr[i])
def test_bad_monitoring_input_in_monitor_based_lr():
# tests that the class MonitorBasedLRAdjuster in sgd.py avoids wrong
# settings of channel_name or dataset_name in the constructor.
dim = 3
m = 10
rng = np.random.RandomState([06, 02, 2014])
X = rng.randn(m, dim)
learning_rate = 1e-2
batch_size = 5
# We need to include this so the test actually stops running at some point
epoch_num = 2
dataset = DenseDesignMatrix(X=X)
# including a monitoring datasets lets us test that
# the monitor works with supervised data
monitoring_dataset = DenseDesignMatrix(X=X)
cost = DummyCost()
model = SoftmaxModel(dim)
termination_criterion = EpochCounter(epoch_num)
algorithm = SGD(learning_rate,
cost,
batch_size=batch_size,
monitoring_batches=2,
monitoring_dataset=monitoring_dataset,
termination_criterion=termination_criterion,
update_callbacks=None,
init_momentum=None,
set_batch_size=False)
# testing for bad dataset_name input
dummy = 'void'
monitor_lr = MonitorBasedLRAdjuster(dataset_name=dummy)
train = Train(dataset,
model,
algorithm,
save_path=None,
save_freq=0,
extensions=[monitor_lr])
try:
train.main_loop()
except ValueError as e:
err_input = 'The dataset_name \'' + dummy + '\' is not valid.'
channel_name = dummy + '_objective'
err_message = ('There is no monitoring channel named \'' +
channel_name +
'\'. You probably need to specify a valid monitoring '
'channel by using either dataset_name or channel_name '
'in the MonitorBasedLRAdjuster constructor. ' +
err_input)
assert err_message == str(e)
except:
raise AssertionError("MonitorBasedLRAdjuster takes dataset_name that "
"is invalid ")
# testing for bad channel_name input
monitor_lr2 = MonitorBasedLRAdjuster(channel_name=dummy)
model2 = SoftmaxModel(dim)
train2 = Train(dataset,
model2,
algorithm,
save_path=None,
save_freq=0,
extensions=[monitor_lr2])
try:
train2.main_loop()
except ValueError as e:
err_input = 'The channel_name \'' + dummy + '\' is not valid.'
err_message = ('There is no monitoring channel named \'' + dummy +
'\'. You probably need to specify a valid monitoring '
'channel by using either dataset_name or channel_name '
'in the MonitorBasedLRAdjuster constructor. ' +
err_input)
assert err_message == str(e)
except:
raise AssertionError("MonitorBasedLRAdjuster takes channel_name that "
"is invalid ")
return
def testing_multiple_datasets_in_monitor_based_lr():
# tests that the class MonitorBasedLRAdjuster in sgd.py does not take
# multiple datasets in which multiple channels ending in '_objective'
# exist.
# This case happens when the user has not specified either channel_name or
# dataset_name in the constructor
dim = 3
m = 10
rng = np.random.RandomState([06, 02, 2014])
X = rng.randn(m, dim)
Y = rng.randn(m, dim)
learning_rate = 1e-2
batch_size = 5
# We need to include this so the test actually stops running at some point
epoch_num = 1
# including a monitoring datasets lets us test that
# the monitor works with supervised data
monitoring_train = DenseDesignMatrix(X=X)
monitoring_test = DenseDesignMatrix(X=Y)
cost = DummyCost()
model = SoftmaxModel(dim)
dataset = DenseDesignMatrix(X=X)
termination_criterion = EpochCounter(epoch_num)
algorithm = SGD(learning_rate,
cost,
batch_size=batch_size,
monitoring_batches=2,
monitoring_dataset={'train': monitoring_train,
'test': monitoring_test},
termination_criterion=termination_criterion,
update_callbacks=None,
init_momentum=None,
set_batch_size=False)
monitor_lr = MonitorBasedLRAdjuster()
train = Train(dataset,
model,
algorithm,
save_path=None,
save_freq=0,
extensions=[monitor_lr])
try:
train.main_loop()
except ValueError:
return
raise AssertionError("MonitorBasedLRAdjuster takes multiple dataset names "
"in which more than one \"objective\" channel exist "
"and the user has not specified either channel_name "
"or database_name in the constructor to "
"disambiguate.")
def testing_multiple_datasets_with_specified_dataset_in_monitor_based_lr():
# tests that the class MonitorBasedLRAdjuster in sgd.py can properly use
# the spcified dataset_name in the constructor when multiple datasets
# exist.
dim = 3
m = 10
rng = np.random.RandomState([06, 02, 2014])
X = rng.randn(m, dim)
Y = rng.randn(m, dim)
learning_rate = 1e-2
batch_size = 5
# We need to include this so the test actually stops running at some point
epoch_num = 1
# including a monitoring datasets lets us test that
# the monitor works with supervised data
monitoring_train = DenseDesignMatrix(X=X)
monitoring_test = DenseDesignMatrix(X=Y)
cost = DummyCost()
model = SoftmaxModel(dim)
dataset = DenseDesignMatrix(X=X)
termination_criterion = EpochCounter(epoch_num)
monitoring_dataset = {'train': monitoring_train, 'test': monitoring_test}
algorithm = SGD(learning_rate,
cost,
batch_size=batch_size,
monitoring_batches=2,
monitoring_dataset=monitoring_dataset,
termination_criterion=termination_criterion,
update_callbacks=None,
init_momentum=None,
set_batch_size=False)
dataset_name = monitoring_dataset.keys()[0]
monitor_lr = MonitorBasedLRAdjuster(dataset_name=dataset_name)
train = Train(dataset,
model,
algorithm,
save_path=None,
save_freq=0,
extensions=[monitor_lr])
train.main_loop()
def test_sgd_topo():
# tests that we can run the sgd algorithm
# on data with topology
# does not test for correctness at all, just
# that the algorithm runs without dying
rows = 3
cols = 4
channels = 2
dim = rows * cols * channels
m = 10
rng = np.random.RandomState([25, 9, 2012])
dataset = get_topological_dataset(rng, rows, cols, channels, m)
# including a monitoring datasets lets us test that
# the monitor works with supervised data
m = 15
monitoring_dataset = get_topological_dataset(rng, rows, cols, channels, m)
model = TopoSoftmaxModel(rows, cols, channels)
learning_rate = 1e-3
batch_size = 5
cost = SupervisedDummyCost()
# We need to include this so the test actually stops running at some point
termination_criterion = EpochCounter(5)
algorithm = SGD(learning_rate,
cost,
batch_size=batch_size,
monitoring_batches=3,
monitoring_dataset=monitoring_dataset,
termination_criterion=termination_criterion,
update_callbacks=None,
init_momentum=None,
set_batch_size=False)
train = Train(dataset,
model,
algorithm,
save_path=None,
save_freq=0,
extensions=None)
train.main_loop()
def test_sgd_no_mon():
# tests that we can run the sgd algorithm
# wihout a monitoring dataset
# does not test for correctness at all, just
# that the algorithm runs without dying
dim = 3
m = 10
rng = np.random.RandomState([25, 9, 2012])
X = rng.randn(m, dim)
idx = rng.randint(0, dim, (m,))
Y = np.zeros((m, dim))
for i in xrange(m):
Y[i, idx[i]] = 1
dataset = DenseDesignMatrix(X=X, y=Y)
m = 15
X = rng.randn(m, dim)
idx = rng.randint(0, dim, (m,))
Y = np.zeros((m, dim))
for i in xrange(m):
Y[i, idx[i]] = 1
model = SoftmaxModel(dim)
learning_rate = 1e-3
batch_size = 5
cost = SupervisedDummyCost()
# We need to include this so the test actually stops running at some point
termination_criterion = EpochCounter(5)
algorithm = SGD(learning_rate,
cost,
batch_size=batch_size,
monitoring_dataset=None,
termination_criterion=termination_criterion,
update_callbacks=None,
init_momentum=None,
set_batch_size=False)
train = Train(dataset,
model,
algorithm,
save_path=None,
save_freq=0,
extensions=None)
train.main_loop()
def test_reject_mon_batch_without_mon():
# tests that setting up the sgd algorithm
# without a monitoring dataset
# but with monitoring_batches specified is an error
dim = 3
m = 10
rng = np.random.RandomState([25, 9, 2012])
X = rng.randn(m, dim)
idx = rng.randint(0, dim, (m,))
Y = np.zeros((m, dim))
for i in xrange(m):
Y[i, idx[i]] = 1
dataset = DenseDesignMatrix(X=X, y=Y)
m = 15
X = rng.randn(m, dim)
idx = rng.randint(0, dim, (m, ))
Y = np.zeros((m, dim))
for i in xrange(m):
Y[i, idx[i]] = 1
model = SoftmaxModel(dim)
learning_rate = 1e-3
batch_size = 5
cost = SupervisedDummyCost()
try:
algorithm = SGD(learning_rate,
cost,
batch_size=batch_size,
monitoring_batches=3,
monitoring_dataset=None,
update_callbacks=None,
init_momentum=None,
set_batch_size=False)
except ValueError:
return
assert False
def test_sgd_sequential():
# tests that requesting train_iteration_mode = 'sequential'
# works
dim = 1
batch_size = 3
m = 5 * batch_size
dataset = ArangeDataset(m)
model = SoftmaxModel(dim)
learning_rate = 1e-3
batch_size = 5
visited = [False] * m
def visit(X):
assert X.shape[1] == 1
assert np.all(X[1:] == X[0:-1]+1)
start = int(X[0, 0])
if start > 0:
assert visited[start - 1]
for i in xrange(batch_size):
assert not visited[start+i]
visited[start+i] = 1
data_specs = (model.get_input_space(), model.get_input_source())
cost = CallbackCost(visit, data_specs)
# We need to include this so the test actually stops running at some point
termination_criterion = EpochCounter(5)
algorithm = SGD(learning_rate,
cost,
batch_size=batch_size,
train_iteration_mode='sequential',
monitoring_dataset=None,
termination_criterion=termination_criterion,
update_callbacks=None,
init_momentum=None,
set_batch_size=False)
algorithm.setup(dataset=dataset, model=model)
algorithm.train(dataset)
assert all(visited)
def test_determinism():
# Verifies that running SGD twice results in the same examples getting
# visited in the same order
for mode in _iteration_schemes:
dim = 1
batch_size = 3
num_batches = 5
m = num_batches * batch_size
dataset = ArangeDataset(m)
model = SoftmaxModel(dim)
learning_rate = 1e-3
batch_size = 5
visited = [[-1] * m]
def visit(X):
mx = max(visited[0])
counter = mx + 1
for i in X[:, 0]:
i = int(i)
assert visited[0][i] == -1
visited[0][i] = counter
counter += 1
data_specs = (model.get_input_space(), model.get_input_source())
cost = CallbackCost(visit, data_specs)
# We need to include this so the test actually stops running at some
# point
termination_criterion = EpochCounter(5)
def run_algorithm():
unsupported_modes = ['random_slice', 'random_uniform']
algorithm = SGD(learning_rate,
cost,
batch_size=batch_size,
train_iteration_mode=mode,
monitoring_dataset=None,
termination_criterion=termination_criterion,
update_callbacks=None,
init_momentum=None,
set_batch_size=False)
algorithm.setup(dataset=dataset, model=model)
raised = False
try:
algorithm.train(dataset)
except ValueError:
print mode
assert mode in unsupported_modes
raised = True
if mode in unsupported_modes:
assert raised
return True
return False
if run_algorithm():
continue
visited.insert(0, [-1] * m)
del model.monitor
run_algorithm()
for v in visited:
assert len(v) == m
for elem in range(m):
assert elem in v
assert len(visited) == 2
print visited[0]
print visited[1]
assert np.all(np.asarray(visited[0]) == np.asarray(visited[1]))
def test_determinism_2():
"""
A more aggressive determinism test. Tests that apply nodes are all passed
inputs with the same md5sums, apply nodes are run in same order, etc. Uses
disturb_mem to try to cause dictionaries to iterate in different orders,
etc.
"""
def run_sgd(mode):
# Must be seeded the same both times run_sgd is called
disturb_mem.disturb_mem()
rng = np.random.RandomState([2012, 11, 27])
batch_size = 5
train_batches = 3
valid_batches = 4
num_features = 2
# Synthesize dataset with a linear decision boundary
w = rng.randn(num_features)
def make_dataset(num_batches):
disturb_mem.disturb_mem()
m = num_batches*batch_size
X = rng.randn(m, num_features)
y = np.zeros((m, 1))
y[:, 0] = np.dot(X, w) > 0.
rval = DenseDesignMatrix(X=X, y=y)
rval.yaml_src = "" # suppress no yaml_src warning
X = rval.get_batch_design(batch_size)
assert X.shape == (batch_size, num_features)
return rval
train = make_dataset(train_batches)
valid = make_dataset(valid_batches)
num_chunks = 10
chunk_width = 2
class ManyParamsModel(Model):
"""
Make a model with lots of parameters, so that there are many
opportunities for their updates to get accidentally re-ordered
non-deterministically. This makes non-determinism bugs manifest
more frequently.
"""
def __init__(self):
self.W1 = [sharedX(rng.randn(num_features, chunk_width)) for i
in xrange(num_chunks)]
disturb_mem.disturb_mem()
self.W2 = [sharedX(rng.randn(chunk_width))
for i in xrange(num_chunks)]
self._params = safe_union(self.W1, self.W2)
self.input_space = VectorSpace(num_features)
self.output_space = VectorSpace(1)
disturb_mem.disturb_mem()
model = ManyParamsModel()
disturb_mem.disturb_mem()
class LotsOfSummingCost(Cost):
"""
Make a cost whose gradient on the parameters involves summing many
terms together, so that T.grad is more likely to sum things in a
random order.
"""
supervised = True
def expr(self, model, data, **kwargs):
self.get_data_specs(model)[0].validate(data)
X, Y = data
disturb_mem.disturb_mem()
def mlp_pred(non_linearity):
Z = [T.dot(X, W) for W in model.W1]
H = map(non_linearity, Z)
Z = [T.dot(h, W) for h, W in safe_izip(H, model.W2)]
pred = sum(Z)
return pred
nonlinearity_predictions = map(mlp_pred,
[T.nnet.sigmoid,
T.nnet.softplus,
T.sqr,
T.sin])
pred = sum(nonlinearity_predictions)
disturb_mem.disturb_mem()
return abs(pred-Y[:, 0]).sum()
def get_data_specs(self, model):
data = CompositeSpace((model.get_input_space(),
model.get_output_space()))
source = (model.get_input_source(), model.get_target_source())
return (data, source)
cost = LotsOfSummingCost()
disturb_mem.disturb_mem()
algorithm = SGD(cost=cost,
batch_size=batch_size,
init_momentum=.5,
learning_rate=1e-3,
monitoring_dataset={'train': train, 'valid': valid},
update_callbacks=[ExponentialDecay(decay_factor=2.,
min_lr=.0001)],
termination_criterion=EpochCounter(max_epochs=5))
disturb_mem.disturb_mem()
train_object = Train(dataset=train,
model=model,
algorithm=algorithm,
extensions=[PolyakAveraging(start=0),
MomentumAdjustor(final_momentum=.9,
start=1,
saturate=5), ],
save_freq=0)
disturb_mem.disturb_mem()
train_object.main_loop()
output = cStringIO.StringIO()
record = Record(file_object=output, replay=False)
record_mode = RecordMode(record)
run_sgd(record_mode)
output = cStringIO.StringIO(output.getvalue())
playback = Record(file_object=output, replay=True)
playback_mode = RecordMode(playback)
run_sgd(playback_mode)
def test_lr_scalers():
"""
Tests that SGD respects Model.get_lr_scalers
"""
# We include a cost other than SumOfParams so that data is actually
# queried from the training set, and the expected number of updates
# are applied.
cost = SumOfCosts([SumOfParams(), (0., DummyCost())])
scales = [.01, .02, .05, 1., 5.]
shapes = [(1,), (9,), (8, 7), (6, 5, 4), (3, 2, 2, 2)]
learning_rate = .001
class ModelWithScalers(Model):
def __init__(self):
self._params = [sharedX(np.zeros(shape)) for shape in shapes]
self.input_space = VectorSpace(1)
def __call__(self, X):
# Implemented only so that DummyCost would work
return X
def get_lr_scalers(self):
return dict(zip(self._params, scales))
model = ModelWithScalers()
dataset = ArangeDataset(1)
sgd = SGD(cost=cost,
learning_rate=learning_rate,
init_momentum=0.,
batch_size=1)
sgd.setup(model=model, dataset=dataset)
manual = [param.get_value() for param in model.get_params()]
manual = [param - learning_rate * scale for param, scale in
zip(manual, scales)]
sgd.train(dataset=dataset)
assert all(np.allclose(manual_param, sgd_param.get_value())
for manual_param, sgd_param
in zip(manual, model.get_params()))
manual = [param - learning_rate * scale
for param, scale
in zip(manual, scales)]
sgd.train(dataset=dataset)
assert all(np.allclose(manual_param, sgd_param.get_value())
for manual_param, sgd_param
in zip(manual, model.get_params()))
def test_lr_scalers_momentum():
"""
Tests that SGD respects Model.get_lr_scalers when using
momentum.
"""
# We include a cost other than SumOfParams so that data is actually
# queried from the training set, and the expected number of updates
# are applied.
cost = SumOfCosts([SumOfParams(), (0., DummyCost())])
scales = [.01, .02, .05, 1., 5.]
shapes = [(1,), (9,), (8, 7), (6, 5, 4), (3, 2, 2, 2)]
model = DummyModel(shapes, lr_scalers=scales)
dataset = ArangeDataset(1)
learning_rate = .001
momentum = 0.5
sgd = SGD(cost=cost,
learning_rate=learning_rate,
init_momentum=momentum,
batch_size=1)
sgd.setup(model=model, dataset=dataset)
manual = [param.get_value() for param in model.get_params()]
inc = [-learning_rate * scale for param, scale in zip(manual, scales)]
manual = [param + i for param, i in zip(manual, inc)]
sgd.train(dataset=dataset)
assert all(np.allclose(manual_param, sgd_param.get_value())
for manual_param, sgd_param
in zip(manual, model.get_params()))
manual = [param - learning_rate * scale + i * momentum
for param, scale, i in
zip(manual, scales, inc)]
sgd.train(dataset=dataset)
assert all(np.allclose(manual_param, sgd_param.get_value())
for manual_param, sgd_param
in zip(manual, model.get_params()))
def test_batch_size_specialization():
# Tests that using a batch size of 1 for training and a batch size
# other than 1 for monitoring does not result in a crash.
# This catches a bug reported in the pylearn-dev@googlegroups.com
# e-mail "[pylearn-dev] monitor assertion error: channel_X.type != X.type"
# The training data was specialized to a row matrix (theano tensor with
# first dim broadcastable) and the monitor ended up with expressions
# mixing the specialized and non-specialized version of the expression.
m = 2
rng = np.random.RandomState([25, 9, 2012])
X = np.zeros((m, 1))
dataset = DenseDesignMatrix(X=X)
model = SoftmaxModel(1)
learning_rate = 1e-3
cost = DummyCost()
algorithm = SGD(learning_rate, cost,
batch_size=1,
monitoring_batches=1,
monitoring_dataset=dataset,
termination_criterion=EpochCounter(max_epochs=1),
update_callbacks=None,
set_batch_size=False)
train = Train(dataset,
model,
algorithm,
save_path=None,
save_freq=0,
extensions=None)
train.main_loop()
def test_uneven_batch_size():
"""
Testing extensively sgd parametrisations for datasets with a number of
examples not divisible by batch size
The tested settings are:
- Model with force_batch_size = True or False
- Training dataset with number of examples divisible or not by batch size
- Monitoring dataset with number of examples divisible or not by batch size
- Even or uneven iterators
2 tests out of 10 should raise ValueError
"""
learning_rate = 1e-3
batch_size = 5
dim = 3
m1, m2, m3 = 10, 15, 22
rng = np.random.RandomState([25, 9, 2012])
dataset1 = DenseDesignMatrix(X=rng.randn(m1, dim))
dataset2 = DenseDesignMatrix(X=rng.randn(m2, dim))
dataset3 = DenseDesignMatrix(X=rng.randn(m3, dim))
def train_with_monitoring_datasets(train_dataset,
monitoring_datasets,
model_force_batch_size,
train_iteration_mode,
monitor_iteration_mode):
model = SoftmaxModel(dim)
if model_force_batch_size:
model.force_batch_size = model_force_batch_size
cost = DummyCost()
algorithm = SGD(learning_rate, cost,
batch_size=batch_size,
train_iteration_mode=train_iteration_mode,
monitor_iteration_mode=monitor_iteration_mode,
monitoring_dataset=monitoring_datasets,
termination_criterion=EpochCounter(2))
train = Train(train_dataset,
model,
algorithm,
save_path=None,
save_freq=0,
extensions=None)
train.main_loop()
no_monitoring_datasets = None
even_monitoring_datasets = {'valid': dataset2}
uneven_monitoring_datasets = {'valid': dataset2, 'test': dataset3}
# without monitoring datasets
train_with_monitoring_datasets(
train_dataset=dataset1,
monitoring_datasets=no_monitoring_datasets,
model_force_batch_size=False,
train_iteration_mode='sequential',
monitor_iteration_mode='sequential')
train_with_monitoring_datasets(
train_dataset=dataset1,
monitoring_datasets=no_monitoring_datasets,
model_force_batch_size=batch_size,
train_iteration_mode='sequential',
monitor_iteration_mode='sequential')
# with uneven training datasets
train_with_monitoring_datasets(
train_dataset=dataset3,
monitoring_datasets=no_monitoring_datasets,
model_force_batch_size=False,
train_iteration_mode='sequential',
monitor_iteration_mode='sequential')
try:
train_with_monitoring_datasets(
train_dataset=dataset3,
monitoring_datasets=no_monitoring_datasets,
model_force_batch_size=batch_size,
train_iteration_mode='sequential',
monitor_iteration_mode='sequential')
assert False
except ValueError:
pass
train_with_monitoring_datasets(
train_dataset=dataset3,
monitoring_datasets=no_monitoring_datasets,
model_force_batch_size=batch_size,
train_iteration_mode='even_sequential',
monitor_iteration_mode='sequential')
# with even monitoring datasets
train_with_monitoring_datasets(
train_dataset=dataset1,
monitoring_datasets=even_monitoring_datasets,
model_force_batch_size=False,
train_iteration_mode='sequential',
monitor_iteration_mode='sequential')
train_with_monitoring_datasets(
train_dataset=dataset1,
monitoring_datasets=even_monitoring_datasets,
model_force_batch_size=batch_size,
train_iteration_mode='sequential',
monitor_iteration_mode='sequential')
# with uneven monitoring datasets
train_with_monitoring_datasets(
train_dataset=dataset1,
monitoring_datasets=uneven_monitoring_datasets,
model_force_batch_size=False,
train_iteration_mode='sequential',
monitor_iteration_mode='sequential')
try:
train_with_monitoring_datasets(
train_dataset=dataset1,
monitoring_datasets=uneven_monitoring_datasets,
model_force_batch_size=batch_size,
train_iteration_mode='sequential',
monitor_iteration_mode='sequential')
assert False
except ValueError:
pass
train_with_monitoring_datasets(
train_dataset=dataset1,
monitoring_datasets=uneven_monitoring_datasets,
model_force_batch_size=batch_size,
train_iteration_mode='sequential',
monitor_iteration_mode='even_sequential')
if __name__ == '__main__':
test_monitor_based_lr()
|
KennethPierce/pylearnk
|
pylearn2/training_algorithms/tests/test_sgd.py
|
Python
|
bsd-3-clause
| 46,250
|
[
"VisIt"
] |
0d4c4369a451d318b06d1c63e88cd0ca79941d04a5171582fb8126a572887cd5
|
# Copyright 2007 by Michiel de Hoon. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Code to work with the sprotXX.dat file from SwissProt.
http://www.expasy.ch/sprot/sprot-top.html
Tested with:
Release 56.9, 03-March-2009.
Classes:
- Record Holds SwissProt data.
- Reference Holds reference data from a SwissProt record.
Functions:
- read Read one SwissProt record
- parse Read multiple SwissProt records
"""
from __future__ import print_function
from Bio._py3k import _as_string
__docformat__ = "restructuredtext en"
class Record(object):
"""Holds information from a SwissProt record.
Members:
- entry_name Name of this entry, e.g. RL1_ECOLI.
- data_class Either 'STANDARD' or 'PRELIMINARY'.
- molecule_type Type of molecule, 'PRT',
- sequence_length Number of residues.
- accessions List of the accession numbers, e.g. ['P00321']
- created A tuple of (date, release).
- sequence_update A tuple of (date, release).
- annotation_update A tuple of (date, release).
- description Free-format description.
- gene_name Gene name. See userman.txt for description.
- organism The source of the sequence.
- organelle The origin of the sequence.
- organism_classification The taxonomy classification. List of strings.
(http://www.ncbi.nlm.nih.gov/Taxonomy/)
- taxonomy_id A list of NCBI taxonomy id's.
- host_organism A list of names of the hosts of a virus, if any.
- host_taxonomy_id A list of NCBI taxonomy id's of the hosts, if any.
- references List of Reference objects.
- comments List of strings.
- cross_references List of tuples (db, id1[, id2][, id3]). See the docs.
- keywords List of the keywords.
- features List of tuples (key name, from, to, description).
from and to can be either integers for the residue
numbers, '<', '>', or '?'
- seqinfo tuple of (length, molecular weight, CRC32 value)
- sequence The sequence.
"""
def __init__(self):
self.entry_name = None
self.data_class = None
self.molecule_type = None
self.sequence_length = None
self.accessions = []
self.created = None
self.sequence_update = None
self.annotation_update = None
self.description = []
self.gene_name = ''
self.organism = []
self.organelle = ''
self.organism_classification = []
self.taxonomy_id = []
self.host_organism = []
self.host_taxonomy_id = []
self.references = []
self.comments = []
self.cross_references = []
self.keywords = []
self.features = []
self.seqinfo = None
self.sequence = ''
class Reference(object):
"""Holds information from one reference in a SwissProt entry.
Members:
number Number of reference in an entry.
positions Describes extent of work. List of strings.
comments Comments. List of (token, text).
references References. List of (dbname, identifier).
authors The authors of the work.
title Title of the work.
location A citation for the work.
"""
def __init__(self):
self.number = None
self.positions = []
self.comments = []
self.references = []
self.authors = []
self.title = []
self.location = []
def parse(handle):
while True:
record = _read(handle)
if not record:
return
yield record
def read(handle):
record = _read(handle)
if not record:
raise ValueError("No SwissProt record found")
# We should have reached the end of the record by now
remainder = handle.read()
if remainder:
raise ValueError("More than one SwissProt record found")
return record
# Everything below is considered private
def _read(handle):
record = None
unread = ""
for line in handle:
# This is for Python 3 to cope with a binary handle (byte strings),
# or a text handle (unicode strings):
line = _as_string(line)
key, value = line[:2], line[5:].rstrip()
if unread:
value = unread + " " + value
unread = ""
if key == '**':
# See Bug 2353, some files from the EBI have extra lines
# starting "**" (two asterisks/stars). They appear
# to be unofficial automated annotations. e.g.
# **
# ** ################# INTERNAL SECTION ##################
# **HA SAM; Annotated by PicoHamap 1.88; MF_01138.1; 09-NOV-2003.
pass
elif key == 'ID':
record = Record()
_read_id(record, line)
_sequence_lines = []
elif key == 'AC':
accessions = [word for word in value.rstrip(";").split("; ")]
record.accessions.extend(accessions)
elif key == 'DT':
_read_dt(record, line)
elif key == 'DE':
record.description.append(value.strip())
elif key == 'GN':
if record.gene_name:
record.gene_name += " "
record.gene_name += value
elif key == 'OS':
record.organism.append(value)
elif key == 'OG':
record.organelle += line[5:]
elif key == 'OC':
cols = [col for col in value.rstrip(";.").split("; ")]
record.organism_classification.extend(cols)
elif key == 'OX':
_read_ox(record, line)
elif key == 'OH':
_read_oh(record, line)
elif key == 'RN':
reference = Reference()
_read_rn(reference, value)
record.references.append(reference)
elif key == 'RP':
assert record.references, "RP: missing RN"
record.references[-1].positions.append(value)
elif key == 'RC':
assert record.references, "RC: missing RN"
reference = record.references[-1]
unread = _read_rc(reference, value)
elif key == 'RX':
assert record.references, "RX: missing RN"
reference = record.references[-1]
_read_rx(reference, value)
elif key == 'RL':
assert record.references, "RL: missing RN"
reference = record.references[-1]
reference.location.append(value)
# In UniProt release 1.12 of 6/21/04, there is a new RG
# (Reference Group) line, which references a group instead of
# an author. Each block must have at least 1 RA or RG line.
elif key == 'RA':
assert record.references, "RA: missing RN"
reference = record.references[-1]
reference.authors.append(value)
elif key == 'RG':
assert record.references, "RG: missing RN"
reference = record.references[-1]
reference.authors.append(value)
elif key == "RT":
assert record.references, "RT: missing RN"
reference = record.references[-1]
reference.title.append(value)
elif key == 'CC':
_read_cc(record, line)
elif key == 'DR':
_read_dr(record, value)
elif key == 'PE':
# TODO - Record this information?
pass
elif key == 'KW':
_read_kw(record, value)
elif key == 'FT':
_read_ft(record, line)
elif key == 'SQ':
cols = value.split()
assert len(cols) == 7, "I don't understand SQ line %s" % line
# Do more checking here?
record.seqinfo = int(cols[1]), int(cols[3]), cols[5]
elif key == ' ':
_sequence_lines.append(value.replace(" ", "").rstrip())
elif key == '//':
# Join multiline data into one string
record.description = " ".join(record.description)
record.organism = " ".join(record.organism)
record.organelle = record.organelle.rstrip()
for reference in record.references:
reference.authors = " ".join(reference.authors).rstrip(";")
reference.title = " ".join(reference.title).rstrip(";")
if reference.title.startswith('"') and reference.title.endswith('"'):
reference.title = reference.title[1:-1] # remove quotes
reference.location = " ".join(reference.location)
record.sequence = "".join(_sequence_lines)
return record
else:
raise ValueError("Unknown keyword '%s' found" % key)
if record:
raise ValueError("Unexpected end of stream.")
def _read_id(record, line):
cols = line[5:].split()
# Prior to release 51, included with MoleculeType:
# ID EntryName DataClass; MoleculeType; SequenceLength AA.
#
# Newer files lack the MoleculeType:
# ID EntryName DataClass; SequenceLength AA.
if len(cols) == 5:
record.entry_name = cols[0]
record.data_class = cols[1].rstrip(";")
record.molecule_type = cols[2].rstrip(";")
record.sequence_length = int(cols[3])
elif len(cols) == 4:
record.entry_name = cols[0]
record.data_class = cols[1].rstrip(";")
record.molecule_type = None
record.sequence_length = int(cols[2])
else:
raise ValueError("ID line has unrecognised format:\n" + line)
# check if the data class is one of the allowed values
allowed = ('STANDARD', 'PRELIMINARY', 'IPI', 'Reviewed', 'Unreviewed')
if record.data_class not in allowed:
raise ValueError("Unrecognized data class %s in line\n%s" %
(record.data_class, line))
# molecule_type should be 'PRT' for PRoTein
# Note that has been removed in recent releases (set to None)
if record.molecule_type not in (None, 'PRT'):
raise ValueError("Unrecognized molecule type %s in line\n%s" %
(record.molecule_type, line))
def _read_dt(record, line):
value = line[5:]
uprline = value.upper()
cols = value.rstrip().split()
if 'CREATED' in uprline \
or 'LAST SEQUENCE UPDATE' in uprline \
or 'LAST ANNOTATION UPDATE' in uprline:
# Old style DT line
# =================
# e.g.
# DT 01-FEB-1995 (Rel. 31, Created)
# DT 01-FEB-1995 (Rel. 31, Last sequence update)
# DT 01-OCT-2000 (Rel. 40, Last annotation update)
#
# or:
# DT 08-JAN-2002 (IPI Human rel. 2.3, Created)
# ...
# find where the version information will be located
# This is needed for when you have cases like IPI where
# the release verison is in a different spot:
# DT 08-JAN-2002 (IPI Human rel. 2.3, Created)
uprcols = uprline.split()
rel_index = -1
for index in range(len(uprcols)):
if 'REL.' in uprcols[index]:
rel_index = index
assert rel_index >= 0, \
"Could not find Rel. in DT line: %s" % line
version_index = rel_index + 1
# get the version information
str_version = cols[version_index].rstrip(",")
# no version number
if str_version == '':
version = 0
# dot versioned
elif '.' in str_version:
version = str_version
# integer versioned
else:
version = int(str_version)
date = cols[0]
if 'CREATED' in uprline:
record.created = date, version
elif 'LAST SEQUENCE UPDATE' in uprline:
record.sequence_update = date, version
elif 'LAST ANNOTATION UPDATE' in uprline:
record.annotation_update = date, version
else:
assert False, "Shouldn't reach this line!"
elif 'INTEGRATED INTO' in uprline \
or 'SEQUENCE VERSION' in uprline \
or 'ENTRY VERSION' in uprline:
# New style DT line
# =================
# As of UniProt Knowledgebase release 7.0 (including
# Swiss-Prot release 49.0 and TrEMBL release 32.0) the
# format of the DT lines and the version information
# in them was changed - the release number was dropped.
#
# For more information see bug 1948 and
# http://ca.expasy.org/sprot/relnotes/sp_news.html#rel7.0
#
# e.g.
# DT 01-JAN-1998, integrated into UniProtKB/Swiss-Prot.
# DT 15-OCT-2001, sequence version 3.
# DT 01-APR-2004, entry version 14.
#
# This is a new style DT line...
# The date should be in string cols[1]
# Get the version number if there is one.
# For the three DT lines above: 0, 3, 14
try:
version = int(cols[-1])
except ValueError:
version = 0
date = cols[0].rstrip(",")
# Re-use the historical property names, even though
# the meaning has changed slighty:
if "INTEGRATED" in uprline:
record.created = date, version
elif 'SEQUENCE VERSION' in uprline:
record.sequence_update = date, version
elif 'ENTRY VERSION' in uprline:
record.annotation_update = date, version
else:
assert False, "Shouldn't reach this line!"
else:
raise ValueError("I don't understand the date line %s" % line)
def _read_ox(record, line):
# The OX line used to be in the simple format:
# OX DESCRIPTION=ID[, ID]...;
# If there are too many id's to fit onto a line, then the ID's
# continue directly onto the next line, e.g.
# OX DESCRIPTION=ID[, ID]...
# OX ID[, ID]...;
# Currently, the description is always "NCBI_TaxID".
# To parse this, I need to check to see whether I'm at the
# first line. If I am, grab the description and make sure
# it's an NCBI ID. Then, grab all the id's.
#
# As of the 2014-10-01 release, there may be an evidence code, e.g.
# OX NCBI_TaxID=418404 {ECO:0000313|EMBL:AEX14553.1};
# In the short term, we will ignore any evidence codes:
line = line.split('{')[0]
if record.taxonomy_id:
ids = line[5:].rstrip().rstrip(";")
else:
descr, ids = line[5:].rstrip().rstrip(";").split("=")
assert descr == "NCBI_TaxID", "Unexpected taxonomy type %s" % descr
record.taxonomy_id.extend(ids.split(', '))
def _read_oh(record, line):
# Line type OH (Organism Host) for viral hosts
assert line[5:].startswith("NCBI_TaxID="), "Unexpected %s" % line
line = line[16:].rstrip()
assert line[-1] == "." and line.count(";") == 1, line
taxid, name = line[:-1].split(";")
record.host_taxonomy_id.append(taxid.strip())
record.host_organism.append(name.strip())
def _read_rn(reference, rn):
# This used to be a very simple line with a reference number, e.g.
# RN [1]
# As of the 2014-10-01 release, there may be an evidence code, e.g.
# RN [1] {ECO:0000313|EMBL:AEX14553.1}
# We will for now ignore this
rn = rn.split()[0]
assert rn[0] == '[' and rn[-1] == ']', "Missing brackets %s" % rn
reference.number = int(rn[1:-1])
def _read_rc(reference, value):
cols = value.split(';')
if value[-1] == ';':
unread = ""
else:
cols, unread = cols[:-1], cols[-1]
for col in cols:
if not col: # last column will be the empty string
return
# The token is everything before the first '=' character.
i = col.find("=")
if i >= 0:
token, text = col[:i], col[i + 1:]
comment = token.lstrip(), text
reference.comments.append(comment)
else:
comment = reference.comments[-1]
comment = "%s %s" % (comment, col)
reference.comments[-1] = comment
return unread
def _read_rx(reference, value):
# The basic (older?) RX line is of the form:
# RX MEDLINE; 85132727.
# but there are variants of this that need to be dealt with (see below)
# CLD1_HUMAN in Release 39 and DADR_DIDMA in Release 33
# have extraneous information in the RX line. Check for
# this and chop it out of the line.
# (noticed by katel@worldpath.net)
value = value.replace(' [NCBI, ExPASy, Israel, Japan]', '')
# RX lines can also be used of the form
# RX PubMed=9603189;
# reported by edvard@farmasi.uit.no
# and these can be more complicated like:
# RX MEDLINE=95385798; PubMed=7656980;
# RX PubMed=15060122; DOI=10.1136/jmg 2003.012781;
# We look for these cases first and deal with them
warn = False
if "=" in value:
cols = value.split("; ")
cols = [x.strip() for x in cols]
cols = [x for x in cols if x]
for col in cols:
x = col.split("=")
if len(x) != 2 or x == ("DOI", "DOI"):
warn = True
break
assert len(x) == 2, "I don't understand RX line %s" % value
reference.references.append((x[0], x[1].rstrip(";")))
# otherwise we assume we have the type 'RX MEDLINE; 85132727.'
else:
cols = value.split("; ")
# normally we split into the three parts
if len(cols) != 2:
warn = True
else:
reference.references.append((cols[0].rstrip(";"), cols[1].rstrip(".")))
if warn:
import warnings
from Bio import BiopythonParserWarning
warnings.warn("Possibly corrupt RX line %r" % value,
BiopythonParserWarning)
def _read_cc(record, line):
key, value = line[5:8], line[9:].rstrip()
if key == '-!-': # Make a new comment
record.comments.append(value)
elif key == ' ': # add to the previous comment
if not record.comments:
# TCMO_STRGA in Release 37 has comment with no topic
record.comments.append(value)
else:
record.comments[-1] += " " + value
def _read_dr(record, value):
cols = value.rstrip(".").split('; ')
record.cross_references.append(tuple(cols))
def _read_kw(record, value):
# Old style - semi-colon separated, multi-line. e.g. Q13639.txt
# KW Alternative splicing; Cell membrane; Complete proteome;
# KW Disulfide bond; Endosome; G-protein coupled receptor; Glycoprotein;
# KW Lipoprotein; Membrane; Palmitate; Polymorphism; Receptor; Transducer;
# KW Transmembrane.
#
# New style as of 2014-10-01 release with evidence codes, e.g. H2CNN8.txt
# KW Monooxygenase {ECO:0000313|EMBL:AEX14553.1};
# KW Oxidoreductase {ECO:0000313|EMBL:AEX14553.1}.
# For now to match the XML parser, drop the evidence codes.
for value in value.rstrip(";.").split('; '):
if value.endswith("}"):
# Discard the evidence code
value = value.rsplit("{", 1)[0]
record.keywords.append(value.strip())
def _read_ft(record, line):
line = line[5:] # get rid of junk in front
name = line[0:8].rstrip()
try:
from_res = int(line[9:15])
except ValueError:
from_res = line[9:15].lstrip()
try:
to_res = int(line[16:22])
except ValueError:
to_res = line[16:22].lstrip()
# if there is a feature_id (FTId), store it away
if line[29:35] == r"/FTId=":
ft_id = line[35:70].rstrip()[:-1]
description = ""
else:
ft_id = ""
description = line[29:70].rstrip()
if not name: # is continuation of last one
assert not from_res and not to_res
name, from_res, to_res, old_description, old_ft_id = record.features[-1]
del record.features[-1]
description = ("%s %s" % (old_description, description)).strip()
# special case -- VARSPLIC, reported by edvard@farmasi.uit.no
if name == "VARSPLIC":
# Remove unwanted spaces in sequences.
# During line carryover, the sequences in VARSPLIC can get mangled
# with unwanted spaces like:
# 'DISSTKLQALPSHGLESIQT -> PCRATGWSPFRRSSPC LPTH'
# We want to check for this case and correct it as it happens.
descr_cols = description.split(" -> ")
if len(descr_cols) == 2:
first_seq, second_seq = descr_cols
extra_info = ''
# we might have more information at the end of the
# second sequence, which should be in parenthesis
extra_info_pos = second_seq.find(" (")
if extra_info_pos != -1:
extra_info = second_seq[extra_info_pos:]
second_seq = second_seq[:extra_info_pos]
# now clean spaces out of the first and second string
first_seq = first_seq.replace(" ", "")
second_seq = second_seq.replace(" ", "")
# reassemble the description
description = first_seq + " -> " + second_seq + extra_info
record.features.append((name, from_res, to_res, description, ft_id))
if __name__ == "__main__":
print("Quick self test...")
example_filename = "../../Tests/SwissProt/sp008"
import os
if not os.path.isfile(example_filename):
print("Missing test file %s" % example_filename)
else:
# Try parsing it!
with open(example_filename) as handle:
records = parse(handle)
for record in records:
print(record.entry_name)
print(",".join(record.accessions))
print(record.keywords)
print(repr(record.organism))
print(record.sequence[:20] + "...")
|
updownlife/multipleK
|
dependencies/biopython-1.65/build/lib.linux-x86_64-2.7/Bio/SwissProt/__init__.py
|
Python
|
gpl-2.0
| 22,123
|
[
"Biopython"
] |
f347542906bb44db0f66cc199aa2d7584622118e25954b5beb15aa7b3906d049
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from itertools import zip_longest, combinations
import json
import os
import warnings
import numpy as np
import tvm
from tvm import relay
from tvm import rpc
from tvm.contrib import graph_executor
from tvm.relay.op.contrib import arm_compute_lib
from tvm.contrib import utils
from tvm.autotvm.measure import request_remote
class Device:
"""
Configuration for Arm Compute Library tests.
Check tests/python/contrib/arm_compute_lib/ for the presence of an test_config.json file.
This file can be used to override the default configuration here which will attempt to run the Arm
Compute Library runtime tests locally if the runtime is available. Changing the configuration
will allow these runtime tests to be offloaded to a remote Arm device via a tracker for example.
Notes
-----
The test configuration will be loaded once when the the class is created. If the configuration
changes between tests, any changes will not be picked up.
Parameters
----------
device : RPCSession
Allows tests to connect to and use remote device.
Attributes
----------
connection_type : str
Details the type of RPC connection to use. Options:
local - Use the local device,
tracker - Connect to a tracker to request a remote device,
remote - Connect to a remote device directly.
host : str
Specify IP address or hostname of remote target.
port : int
Specify port number of remote target.
target : str
The compilation target.
device_key : str
The device key of the remote target. Use when connecting to a remote device via a tracker.
cross_compile : str
Specify path to cross compiler to use when connecting a remote device from a non-arm platform.
"""
connection_type = "local"
host = "127.0.0.1"
port = 9090
target = "llvm -mtriple=aarch64-linux-gnu -mattr=+neon"
device_key = ""
cross_compile = ""
def __init__(self):
"""Keep remote device for lifetime of object."""
self.device = self._get_remote()
@classmethod
def _get_remote(cls):
"""Get a remote (or local) device to use for testing."""
if cls.connection_type == "tracker":
device = request_remote(cls.device_key, cls.host, cls.port, timeout=1000)
elif cls.connection_type == "remote":
device = rpc.connect(cls.host, cls.port)
elif cls.connection_type == "local":
device = rpc.LocalSession()
else:
raise ValueError(
"connection_type in test_config.json should be one of: " "local, tracker, remote."
)
return device
@classmethod
def load(cls, file_name):
"""Load test config
Load the test configuration by looking for file_name relative
to the test_arm_compute_lib directory.
"""
location = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
config_file = os.path.join(location, file_name)
if not os.path.exists(config_file):
warnings.warn(
"Config file doesn't exist, resuming Arm Compute Library tests with default config."
)
return
with open(config_file, mode="r") as config:
test_config = json.load(config)
cls.connection_type = test_config["connection_type"]
cls.host = test_config["host"]
cls.port = test_config["port"]
cls.target = test_config["target"]
cls.device_key = test_config.get("device_key") or ""
cls.cross_compile = test_config.get("cross_compile") or ""
def get_cpu_op_count(mod):
"""Traverse graph counting ops offloaded to TVM."""
class Counter(tvm.relay.ExprVisitor):
def __init__(self):
super().__init__()
self.count = 0
def visit_call(self, call):
if isinstance(call.op, tvm.ir.Op):
self.count += 1
super().visit_call(call)
c = Counter()
c.visit(mod["main"])
return c.count
def skip_runtime_test():
"""Skip test if it requires the runtime and it's not present."""
# ACL codegen not present.
if not tvm.get_global_func("relay.ext.arm_compute_lib", True):
print("Skip because Arm Compute Library codegen is not available.")
return True
# Remote device is in use or ACL runtime not present
# Note: Ensure that the device config has been loaded before this check
if (
not Device.connection_type != "local"
and not arm_compute_lib.is_arm_compute_runtime_enabled()
):
print("Skip because runtime isn't present or a remote device isn't being used.")
return True
def skip_codegen_test():
"""Skip test if it requires the ACL codegen and it's not present."""
if not tvm.get_global_func("relay.ext.arm_compute_lib", True):
print("Skip because Arm Compute Library codegen is not available.")
return True
def build_module(mod, target, params=None, enable_acl=True, tvm_ops=0, acl_partitions=1):
"""Build module with option to build for ACL."""
if isinstance(mod, tvm.relay.expr.Call):
mod = tvm.IRModule.from_expr(mod)
with tvm.transform.PassContext(opt_level=3, disabled_pass=["AlterOpLayout"]):
if enable_acl:
mod = arm_compute_lib.partition_for_arm_compute_lib(mod, params)
tvm_op_count = get_cpu_op_count(mod)
assert tvm_op_count == tvm_ops, "Got {} TVM operators, expected {}".format(
tvm_op_count, tvm_ops
)
partition_count = 0
for global_var in mod.get_global_vars():
if "arm_compute_lib" in global_var.name_hint:
partition_count += 1
assert (
acl_partitions == partition_count
), "Got {} Arm Compute Library partitions, expected {}".format(
partition_count, acl_partitions
)
relay.backend.te_compiler.get().clear()
return relay.build(mod, target=target, params=params)
def build_and_run(
mod,
inputs,
outputs,
params,
device,
enable_acl=True,
no_runs=1,
tvm_ops=0,
acl_partitions=1,
config=None,
):
"""Build and run the relay module."""
if config is None:
config = {}
try:
lib = build_module(mod, device.target, params, enable_acl, tvm_ops, acl_partitions)
except Exception as e:
err_msg = "The module could not be built.\n"
if config:
err_msg += f"The test failed with the following parameters: {config}\n"
err_msg += str(e)
raise Exception(err_msg)
lib = update_lib(lib, device.device, device.cross_compile)
gen_module = graph_executor.GraphModule(lib["default"](device.device.cpu(0)))
gen_module.set_input(**inputs)
out = []
for _ in range(no_runs):
gen_module.run()
out.append([gen_module.get_output(i) for i in range(outputs)])
return out
def update_lib(lib, device, cross_compile):
"""Export the library to the remote/local device."""
lib_name = "mod.so"
temp = utils.tempdir()
lib_path = temp.relpath(lib_name)
if cross_compile:
lib.export_library(lib_path, cc=cross_compile)
else:
lib.export_library(lib_path)
device.upload(lib_path)
lib = device.load_module(lib_name)
return lib
def verify(answers, atol, rtol, verify_saturation=False, config=None):
"""Compare the array of answers. Each entry is a list of outputs."""
if config is None:
config = {}
if len(answers) < 2:
raise RuntimeError(f"No results to compare: expected at least two, found {len(answers)}")
for answer in zip_longest(*answers):
for outs in combinations(answer, 2):
try:
if verify_saturation:
assert (
np.count_nonzero(outs[0].numpy() == 255) < 0.25 * outs[0].numpy().size
), "Output is saturated: {}".format(outs[0])
assert (
np.count_nonzero(outs[0].numpy() == 0) < 0.25 * outs[0].numpy().size
), "Output is saturated: {}".format(outs[0])
tvm.testing.assert_allclose(outs[0].numpy(), outs[1].numpy(), rtol=rtol, atol=atol)
except AssertionError as e:
err_msg = "Results not within the acceptable tolerance.\n"
if config:
err_msg += f"The test failed with the following parameters: {config}\n"
err_msg += str(e)
raise AssertionError(err_msg)
def extract_acl_modules(module):
"""Get the ACL module(s) from llvm module."""
return list(
filter(lambda mod: mod.type_key == "arm_compute_lib", module.get_lib().imported_modules)
)
def verify_codegen(
module,
known_good_codegen,
num_acl_modules=1,
tvm_ops=0,
target="llvm -mtriple=aarch64-linux-gnu -mattr=+neon",
):
"""Check acl codegen against a known good output."""
module = build_module(module, target, tvm_ops=tvm_ops, acl_partitions=num_acl_modules)
acl_modules = extract_acl_modules(module)
assert len(acl_modules) == num_acl_modules, (
f"The number of Arm Compute Library modules produced ({len(acl_modules)}) does not "
f"match the expected value ({num_acl_modules})."
)
for mod in acl_modules:
source = mod.get_source("json")
codegen = json.loads(source)["nodes"]
# remove input and const names as these cannot be predetermined
for node in range(len(codegen)):
if codegen[node]["op"] == "input" or codegen[node]["op"] == "const":
codegen[node]["name"] = ""
codegen_str = json.dumps(codegen, sort_keys=True, indent=2)
known_good_codegen_str = json.dumps(known_good_codegen, sort_keys=True, indent=2)
assert codegen_str == known_good_codegen_str, (
f"The JSON produced by codegen does not match the expected result. \n"
f"Actual={codegen_str} \n"
f"Expected={known_good_codegen_str}"
)
|
dmlc/tvm
|
tests/python/contrib/test_arm_compute_lib/infrastructure.py
|
Python
|
apache-2.0
| 11,014
|
[
"VisIt"
] |
6e6d22253ba3959ac44b5ba8afc030553a83ea8c0cd75d4c246efa2bde1e23d3
|
"""
This is only meant to add docs to objects defined in C-extension modules.
The purpose is to allow easier editing of the docstrings without
requiring a re-compile.
NOTE: Many of the methods of ndarray have corresponding functions.
If you update these docstrings, please keep also the ones in
core/fromnumeric.py, core/defmatrix.py up-to-date.
"""
from __future__ import division, absolute_import, print_function
from numpy.lib import add_newdoc
###############################################################################
#
# flatiter
#
# flatiter needs a toplevel description
#
###############################################################################
add_newdoc('numpy.core', 'flatiter',
"""
Flat iterator object to iterate over arrays.
A `flatiter` iterator is returned by ``x.flat`` for any array `x`.
It allows iterating over the array as if it were a 1-D array,
either in a for-loop or by calling its `next` method.
Iteration is done in C-contiguous style, with the last index varying the
fastest. The iterator can also be indexed using basic slicing or
advanced indexing.
See Also
--------
ndarray.flat : Return a flat iterator over an array.
ndarray.flatten : Returns a flattened copy of an array.
Notes
-----
A `flatiter` iterator can not be constructed directly from Python code
by calling the `flatiter` constructor.
Examples
--------
>>> x = np.arange(6).reshape(2, 3)
>>> fl = x.flat
>>> type(fl)
<type 'numpy.flatiter'>
>>> for item in fl:
... print item
...
0
1
2
3
4
5
>>> fl[2:4]
array([2, 3])
""")
# flatiter attributes
add_newdoc('numpy.core', 'flatiter', ('base',
"""
A reference to the array that is iterated over.
Examples
--------
>>> x = np.arange(5)
>>> fl = x.flat
>>> fl.base is x
True
"""))
add_newdoc('numpy.core', 'flatiter', ('coords',
"""
An N-dimensional tuple of current coordinates.
Examples
--------
>>> x = np.arange(6).reshape(2, 3)
>>> fl = x.flat
>>> fl.coords
(0, 0)
>>> fl.next()
0
>>> fl.coords
(0, 1)
"""))
add_newdoc('numpy.core', 'flatiter', ('index',
"""
Current flat index into the array.
Examples
--------
>>> x = np.arange(6).reshape(2, 3)
>>> fl = x.flat
>>> fl.index
0
>>> fl.next()
0
>>> fl.index
1
"""))
# flatiter functions
add_newdoc('numpy.core', 'flatiter', ('__array__',
"""__array__(type=None) Get array from iterator
"""))
add_newdoc('numpy.core', 'flatiter', ('copy',
"""
copy()
Get a copy of the iterator as a 1-D array.
Examples
--------
>>> x = np.arange(6).reshape(2, 3)
>>> x
array([[0, 1, 2],
[3, 4, 5]])
>>> fl = x.flat
>>> fl.copy()
array([0, 1, 2, 3, 4, 5])
"""))
###############################################################################
#
# nditer
#
###############################################################################
add_newdoc('numpy.core', 'nditer',
"""
Efficient multi-dimensional iterator object to iterate over arrays.
To get started using this object, see the
:ref:`introductory guide to array iteration <arrays.nditer>`.
Parameters
----------
op : ndarray or sequence of array_like
The array(s) to iterate over.
flags : sequence of str, optional
Flags to control the behavior of the iterator.
* "buffered" enables buffering when required.
* "c_index" causes a C-order index to be tracked.
* "f_index" causes a Fortran-order index to be tracked.
* "multi_index" causes a multi-index, or a tuple of indices
with one per iteration dimension, to be tracked.
* "common_dtype" causes all the operands to be converted to
a common data type, with copying or buffering as necessary.
* "delay_bufalloc" delays allocation of the buffers until
a reset() call is made. Allows "allocate" operands to
be initialized before their values are copied into the buffers.
* "external_loop" causes the `values` given to be
one-dimensional arrays with multiple values instead of
zero-dimensional arrays.
* "grow_inner" allows the `value` array sizes to be made
larger than the buffer size when both "buffered" and
"external_loop" is used.
* "ranged" allows the iterator to be restricted to a sub-range
of the iterindex values.
* "refs_ok" enables iteration of reference types, such as
object arrays.
* "reduce_ok" enables iteration of "readwrite" operands
which are broadcasted, also known as reduction operands.
* "zerosize_ok" allows `itersize` to be zero.
op_flags : list of list of str, optional
This is a list of flags for each operand. At minimum, one of
"readonly", "readwrite", or "writeonly" must be specified.
* "readonly" indicates the operand will only be read from.
* "readwrite" indicates the operand will be read from and written to.
* "writeonly" indicates the operand will only be written to.
* "no_broadcast" prevents the operand from being broadcasted.
* "contig" forces the operand data to be contiguous.
* "aligned" forces the operand data to be aligned.
* "nbo" forces the operand data to be in native byte order.
* "copy" allows a temporary read-only copy if required.
* "updateifcopy" allows a temporary read-write copy if required.
* "allocate" causes the array to be allocated if it is None
in the `op` parameter.
* "no_subtype" prevents an "allocate" operand from using a subtype.
* "arraymask" indicates that this operand is the mask to use
for selecting elements when writing to operands with the
'writemasked' flag set. The iterator does not enforce this,
but when writing from a buffer back to the array, it only
copies those elements indicated by this mask.
* 'writemasked' indicates that only elements where the chosen
'arraymask' operand is True will be written to.
op_dtypes : dtype or tuple of dtype(s), optional
The required data type(s) of the operands. If copying or buffering
is enabled, the data will be converted to/from their original types.
order : {'C', 'F', 'A', 'K'}, optional
Controls the iteration order. 'C' means C order, 'F' means
Fortran order, 'A' means 'F' order if all the arrays are Fortran
contiguous, 'C' order otherwise, and 'K' means as close to the
order the array elements appear in memory as possible. This also
affects the element memory order of "allocate" operands, as they
are allocated to be compatible with iteration order.
Default is 'K'.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur when making a copy
or buffering. Setting this to 'unsafe' is not recommended,
as it can adversely affect accumulations.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
op_axes : list of list of ints, optional
If provided, is a list of ints or None for each operands.
The list of axes for an operand is a mapping from the dimensions
of the iterator to the dimensions of the operand. A value of
-1 can be placed for entries, causing that dimension to be
treated as "newaxis".
itershape : tuple of ints, optional
The desired shape of the iterator. This allows "allocate" operands
with a dimension mapped by op_axes not corresponding to a dimension
of a different operand to get a value not equal to 1 for that
dimension.
buffersize : int, optional
When buffering is enabled, controls the size of the temporary
buffers. Set to 0 for the default value.
Attributes
----------
dtypes : tuple of dtype(s)
The data types of the values provided in `value`. This may be
different from the operand data types if buffering is enabled.
finished : bool
Whether the iteration over the operands is finished or not.
has_delayed_bufalloc : bool
If True, the iterator was created with the "delay_bufalloc" flag,
and no reset() function was called on it yet.
has_index : bool
If True, the iterator was created with either the "c_index" or
the "f_index" flag, and the property `index` can be used to
retrieve it.
has_multi_index : bool
If True, the iterator was created with the "multi_index" flag,
and the property `multi_index` can be used to retrieve it.
index :
When the "c_index" or "f_index" flag was used, this property
provides access to the index. Raises a ValueError if accessed
and `has_index` is False.
iterationneedsapi : bool
Whether iteration requires access to the Python API, for example
if one of the operands is an object array.
iterindex : int
An index which matches the order of iteration.
itersize : int
Size of the iterator.
itviews :
Structured view(s) of `operands` in memory, matching the reordered
and optimized iterator access pattern.
multi_index :
When the "multi_index" flag was used, this property
provides access to the index. Raises a ValueError if accessed
accessed and `has_multi_index` is False.
ndim : int
The iterator's dimension.
nop : int
The number of iterator operands.
operands : tuple of operand(s)
The array(s) to be iterated over.
shape : tuple of ints
Shape tuple, the shape of the iterator.
value :
Value of `operands` at current iteration. Normally, this is a
tuple of array scalars, but if the flag "external_loop" is used,
it is a tuple of one dimensional arrays.
Notes
-----
`nditer` supersedes `flatiter`. The iterator implementation behind
`nditer` is also exposed by the Numpy C API.
The Python exposure supplies two iteration interfaces, one which follows
the Python iterator protocol, and another which mirrors the C-style
do-while pattern. The native Python approach is better in most cases, but
if you need the iterator's coordinates or index, use the C-style pattern.
Examples
--------
Here is how we might write an ``iter_add`` function, using the
Python iterator protocol::
def iter_add_py(x, y, out=None):
addop = np.add
it = np.nditer([x, y, out], [],
[['readonly'], ['readonly'], ['writeonly','allocate']])
for (a, b, c) in it:
addop(a, b, out=c)
return it.operands[2]
Here is the same function, but following the C-style pattern::
def iter_add(x, y, out=None):
addop = np.add
it = np.nditer([x, y, out], [],
[['readonly'], ['readonly'], ['writeonly','allocate']])
while not it.finished:
addop(it[0], it[1], out=it[2])
it.iternext()
return it.operands[2]
Here is an example outer product function::
def outer_it(x, y, out=None):
mulop = np.multiply
it = np.nditer([x, y, out], ['external_loop'],
[['readonly'], ['readonly'], ['writeonly', 'allocate']],
op_axes=[range(x.ndim)+[-1]*y.ndim,
[-1]*x.ndim+range(y.ndim),
None])
for (a, b, c) in it:
mulop(a, b, out=c)
return it.operands[2]
>>> a = np.arange(2)+1
>>> b = np.arange(3)+1
>>> outer_it(a,b)
array([[1, 2, 3],
[2, 4, 6]])
Here is an example function which operates like a "lambda" ufunc::
def luf(lamdaexpr, *args, **kwargs):
"luf(lambdaexpr, op1, ..., opn, out=None, order='K', casting='safe', buffersize=0)"
nargs = len(args)
op = (kwargs.get('out',None),) + args
it = np.nditer(op, ['buffered','external_loop'],
[['writeonly','allocate','no_broadcast']] +
[['readonly','nbo','aligned']]*nargs,
order=kwargs.get('order','K'),
casting=kwargs.get('casting','safe'),
buffersize=kwargs.get('buffersize',0))
while not it.finished:
it[0] = lamdaexpr(*it[1:])
it.iternext()
return it.operands[0]
>>> a = np.arange(5)
>>> b = np.ones(5)
>>> luf(lambda i,j:i*i + j/2, a, b)
array([ 0.5, 1.5, 4.5, 9.5, 16.5])
""")
# nditer methods
add_newdoc('numpy.core', 'nditer', ('copy',
"""
copy()
Get a copy of the iterator in its current state.
Examples
--------
>>> x = np.arange(10)
>>> y = x + 1
>>> it = np.nditer([x, y])
>>> it.next()
(array(0), array(1))
>>> it2 = it.copy()
>>> it2.next()
(array(1), array(2))
"""))
add_newdoc('numpy.core', 'nditer', ('debug_print',
"""
debug_print()
Print the current state of the `nditer` instance and debug info to stdout.
"""))
add_newdoc('numpy.core', 'nditer', ('enable_external_loop',
"""
enable_external_loop()
When the "external_loop" was not used during construction, but
is desired, this modifies the iterator to behave as if the flag
was specified.
"""))
add_newdoc('numpy.core', 'nditer', ('iternext',
"""
iternext()
Check whether iterations are left, and perform a single internal iteration
without returning the result. Used in the C-style pattern do-while
pattern. For an example, see `nditer`.
Returns
-------
iternext : bool
Whether or not there are iterations left.
"""))
add_newdoc('numpy.core', 'nditer', ('remove_axis',
"""
remove_axis(i)
Removes axis `i` from the iterator. Requires that the flag "multi_index"
be enabled.
"""))
add_newdoc('numpy.core', 'nditer', ('remove_multi_index',
"""
remove_multi_index()
When the "multi_index" flag was specified, this removes it, allowing
the internal iteration structure to be optimized further.
"""))
add_newdoc('numpy.core', 'nditer', ('reset',
"""
reset()
Reset the iterator to its initial state.
"""))
###############################################################################
#
# broadcast
#
###############################################################################
add_newdoc('numpy.core', 'broadcast',
"""
Produce an object that mimics broadcasting.
Parameters
----------
in1, in2, ... : array_like
Input parameters.
Returns
-------
b : broadcast object
Broadcast the input parameters against one another, and
return an object that encapsulates the result.
Amongst others, it has ``shape`` and ``nd`` properties, and
may be used as an iterator.
Examples
--------
Manually adding two vectors, using broadcasting:
>>> x = np.array([[1], [2], [3]])
>>> y = np.array([4, 5, 6])
>>> b = np.broadcast(x, y)
>>> out = np.empty(b.shape)
>>> out.flat = [u+v for (u,v) in b]
>>> out
array([[ 5., 6., 7.],
[ 6., 7., 8.],
[ 7., 8., 9.]])
Compare against built-in broadcasting:
>>> x + y
array([[5, 6, 7],
[6, 7, 8],
[7, 8, 9]])
""")
# attributes
add_newdoc('numpy.core', 'broadcast', ('index',
"""
current index in broadcasted result
Examples
--------
>>> x = np.array([[1], [2], [3]])
>>> y = np.array([4, 5, 6])
>>> b = np.broadcast(x, y)
>>> b.index
0
>>> b.next(), b.next(), b.next()
((1, 4), (1, 5), (1, 6))
>>> b.index
3
"""))
add_newdoc('numpy.core', 'broadcast', ('iters',
"""
tuple of iterators along ``self``'s "components."
Returns a tuple of `numpy.flatiter` objects, one for each "component"
of ``self``.
See Also
--------
numpy.flatiter
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> row, col = b.iters
>>> row.next(), col.next()
(1, 4)
"""))
add_newdoc('numpy.core', 'broadcast', ('nd',
"""
Number of dimensions of broadcasted result.
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> b.nd
2
"""))
add_newdoc('numpy.core', 'broadcast', ('numiter',
"""
Number of iterators possessed by the broadcasted result.
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> b.numiter
2
"""))
add_newdoc('numpy.core', 'broadcast', ('shape',
"""
Shape of broadcasted result.
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> b.shape
(3, 3)
"""))
add_newdoc('numpy.core', 'broadcast', ('size',
"""
Total size of broadcasted result.
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> b.size
9
"""))
add_newdoc('numpy.core', 'broadcast', ('reset',
"""
reset()
Reset the broadcasted result's iterator(s).
Parameters
----------
None
Returns
-------
None
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]]
>>> b = np.broadcast(x, y)
>>> b.index
0
>>> b.next(), b.next(), b.next()
((1, 4), (2, 4), (3, 4))
>>> b.index
3
>>> b.reset()
>>> b.index
0
"""))
###############################################################################
#
# numpy functions
#
###############################################################################
add_newdoc('numpy.core.multiarray', 'array',
"""
array(object, dtype=None, copy=True, order=None, subok=False, ndmin=0)
Create an array.
Parameters
----------
object : array_like
An array, any object exposing the array interface, an
object whose __array__ method returns an array, or any
(nested) sequence.
dtype : data-type, optional
The desired data-type for the array. If not given, then
the type will be determined as the minimum type required
to hold the objects in the sequence. This argument can only
be used to 'upcast' the array. For downcasting, use the
.astype(t) method.
copy : bool, optional
If true (default), then the object is copied. Otherwise, a copy
will only be made if __array__ returns a copy, if obj is a
nested sequence, or if a copy is needed to satisfy any of the other
requirements (`dtype`, `order`, etc.).
order : {'C', 'F', 'A'}, optional
Specify the order of the array. If order is 'C', then the array
will be in C-contiguous order (last-index varies the fastest).
If order is 'F', then the returned array will be in
Fortran-contiguous order (first-index varies the fastest).
If order is 'A' (default), then the returned array may be
in any order (either C-, Fortran-contiguous, or even discontiguous),
unless a copy is required, in which case it will be C-contiguous.
subok : bool, optional
If True, then sub-classes will be passed-through, otherwise
the returned array will be forced to be a base-class array (default).
ndmin : int, optional
Specifies the minimum number of dimensions that the resulting
array should have. Ones will be pre-pended to the shape as
needed to meet this requirement.
Returns
-------
out : ndarray
An array object satisfying the specified requirements.
See Also
--------
empty, empty_like, zeros, zeros_like, ones, ones_like, fill
Examples
--------
>>> np.array([1, 2, 3])
array([1, 2, 3])
Upcasting:
>>> np.array([1, 2, 3.0])
array([ 1., 2., 3.])
More than one dimension:
>>> np.array([[1, 2], [3, 4]])
array([[1, 2],
[3, 4]])
Minimum dimensions 2:
>>> np.array([1, 2, 3], ndmin=2)
array([[1, 2, 3]])
Type provided:
>>> np.array([1, 2, 3], dtype=complex)
array([ 1.+0.j, 2.+0.j, 3.+0.j])
Data-type consisting of more than one element:
>>> x = np.array([(1,2),(3,4)],dtype=[('a','<i4'),('b','<i4')])
>>> x['a']
array([1, 3])
Creating an array from sub-classes:
>>> np.array(np.mat('1 2; 3 4'))
array([[1, 2],
[3, 4]])
>>> np.array(np.mat('1 2; 3 4'), subok=True)
matrix([[1, 2],
[3, 4]])
""")
add_newdoc('numpy.core.multiarray', 'empty',
"""
empty(shape, dtype=float, order='C')
Return a new array of given shape and type, without initializing entries.
Parameters
----------
shape : int or tuple of int
Shape of the empty array
dtype : data-type, optional
Desired output data-type.
order : {'C', 'F'}, optional
Whether to store multi-dimensional data in C (row-major) or
Fortran (column-major) order in memory.
Returns
-------
out : ndarray
Array of uninitialized (arbitrary) data with the given
shape, dtype, and order.
See Also
--------
empty_like, zeros, ones
Notes
-----
`empty`, unlike `zeros`, does not set the array values to zero,
and may therefore be marginally faster. On the other hand, it requires
the user to manually set all the values in the array, and should be
used with caution.
Examples
--------
>>> np.empty([2, 2])
array([[ -9.74499359e+001, 6.69583040e-309],
[ 2.13182611e-314, 3.06959433e-309]]) #random
>>> np.empty([2, 2], dtype=int)
array([[-1073741821, -1067949133],
[ 496041986, 19249760]]) #random
""")
add_newdoc('numpy.core.multiarray', 'empty_like',
"""
empty_like(a, dtype=None, order='K', subok=True)
Return a new array with the same shape and type as a given array.
Parameters
----------
a : array_like
The shape and data-type of `a` define these same attributes of the
returned array.
dtype : data-type, optional
.. versionadded:: 1.6.0
Overrides the data type of the result.
order : {'C', 'F', 'A', or 'K'}, optional
.. versionadded:: 1.6.0
Overrides the memory layout of the result. 'C' means C-order,
'F' means F-order, 'A' means 'F' if ``a`` is Fortran contiguous,
'C' otherwise. 'K' means match the layout of ``a`` as closely
as possible.
subok : bool, optional.
If True, then the newly created array will use the sub-class
type of 'a', otherwise it will be a base-class array. Defaults
to True.
Returns
-------
out : ndarray
Array of uninitialized (arbitrary) data with the same
shape and type as `a`.
See Also
--------
ones_like : Return an array of ones with shape and type of input.
zeros_like : Return an array of zeros with shape and type of input.
empty : Return a new uninitialized array.
ones : Return a new array setting values to one.
zeros : Return a new array setting values to zero.
Notes
-----
This function does *not* initialize the returned array; to do that use
`zeros_like` or `ones_like` instead. It may be marginally faster than
the functions that do set the array values.
Examples
--------
>>> a = ([1,2,3], [4,5,6]) # a is array-like
>>> np.empty_like(a)
array([[-1073741821, -1073741821, 3], #random
[ 0, 0, -1073741821]])
>>> a = np.array([[1., 2., 3.],[4.,5.,6.]])
>>> np.empty_like(a)
array([[ -2.00000715e+000, 1.48219694e-323, -2.00000572e+000],#random
[ 4.38791518e-305, -2.00000715e+000, 4.17269252e-309]])
""")
add_newdoc('numpy.core.multiarray', 'scalar',
"""
scalar(dtype, obj)
Return a new scalar array of the given type initialized with obj.
This function is meant mainly for pickle support. `dtype` must be a
valid data-type descriptor. If `dtype` corresponds to an object
descriptor, then `obj` can be any object, otherwise `obj` must be a
string. If `obj` is not given, it will be interpreted as None for object
type and as zeros for all other types.
""")
add_newdoc('numpy.core.multiarray', 'zeros',
"""
zeros(shape, dtype=float, order='C')
Return a new array of given shape and type, filled with zeros.
Parameters
----------
shape : int or sequence of ints
Shape of the new array, e.g., ``(2, 3)`` or ``2``.
dtype : data-type, optional
The desired data-type for the array, e.g., `numpy.int8`. Default is
`numpy.float64`.
order : {'C', 'F'}, optional
Whether to store multidimensional data in C- or Fortran-contiguous
(row- or column-wise) order in memory.
Returns
-------
out : ndarray
Array of zeros with the given shape, dtype, and order.
See Also
--------
zeros_like : Return an array of zeros with shape and type of input.
ones_like : Return an array of ones with shape and type of input.
empty_like : Return an empty array with shape and type of input.
ones : Return a new array setting values to one.
empty : Return a new uninitialized array.
Examples
--------
>>> np.zeros(5)
array([ 0., 0., 0., 0., 0.])
>>> np.zeros((5,), dtype=np.int)
array([0, 0, 0, 0, 0])
>>> np.zeros((2, 1))
array([[ 0.],
[ 0.]])
>>> s = (2,2)
>>> np.zeros(s)
array([[ 0., 0.],
[ 0., 0.]])
>>> np.zeros((2,), dtype=[('x', 'i4'), ('y', 'i4')]) # custom dtype
array([(0, 0), (0, 0)],
dtype=[('x', '<i4'), ('y', '<i4')])
""")
add_newdoc('numpy.core.multiarray', 'count_nonzero',
"""
count_nonzero(a)
Counts the number of non-zero values in the array ``a``.
Parameters
----------
a : array_like
The array for which to count non-zeros.
Returns
-------
count : int or array of int
Number of non-zero values in the array.
See Also
--------
nonzero : Return the coordinates of all the non-zero values.
Examples
--------
>>> np.count_nonzero(np.eye(4))
4
>>> np.count_nonzero([[0,1,7,0,0],[3,0,0,2,19]])
5
""")
add_newdoc('numpy.core.multiarray', 'set_typeDict',
"""set_typeDict(dict)
Set the internal dictionary that can look up an array type using a
registered code.
""")
add_newdoc('numpy.core.multiarray', 'fromstring',
"""
fromstring(string, dtype=float, count=-1, sep='')
A new 1-D array initialized from raw binary or text data in a string.
Parameters
----------
string : str
A string containing the data.
dtype : data-type, optional
The data type of the array; default: float. For binary input data,
the data must be in exactly this format.
count : int, optional
Read this number of `dtype` elements from the data. If this is
negative (the default), the count will be determined from the
length of the data.
sep : str, optional
If not provided or, equivalently, the empty string, the data will
be interpreted as binary data; otherwise, as ASCII text with
decimal numbers. Also in this latter case, this argument is
interpreted as the string separating numbers in the data; extra
whitespace between elements is also ignored.
Returns
-------
arr : ndarray
The constructed array.
Raises
------
ValueError
If the string is not the correct size to satisfy the requested
`dtype` and `count`.
See Also
--------
frombuffer, fromfile, fromiter
Examples
--------
>>> np.fromstring('\\x01\\x02', dtype=np.uint8)
array([1, 2], dtype=uint8)
>>> np.fromstring('1 2', dtype=int, sep=' ')
array([1, 2])
>>> np.fromstring('1, 2', dtype=int, sep=',')
array([1, 2])
>>> np.fromstring('\\x01\\x02\\x03\\x04\\x05', dtype=np.uint8, count=3)
array([1, 2, 3], dtype=uint8)
""")
add_newdoc('numpy.core.multiarray', 'fromiter',
"""
fromiter(iterable, dtype, count=-1)
Create a new 1-dimensional array from an iterable object.
Parameters
----------
iterable : iterable object
An iterable object providing data for the array.
dtype : data-type
The data-type of the returned array.
count : int, optional
The number of items to read from *iterable*. The default is -1,
which means all data is read.
Returns
-------
out : ndarray
The output array.
Notes
-----
Specify `count` to improve performance. It allows ``fromiter`` to
pre-allocate the output array, instead of resizing it on demand.
Examples
--------
>>> iterable = (x*x for x in range(5))
>>> np.fromiter(iterable, np.float)
array([ 0., 1., 4., 9., 16.])
""")
add_newdoc('numpy.core.multiarray', 'fromfile',
"""
fromfile(file, dtype=float, count=-1, sep='')
Construct an array from data in a text or binary file.
A highly efficient way of reading binary data with a known data-type,
as well as parsing simply formatted text files. Data written using the
`tofile` method can be read using this function.
Parameters
----------
file : file or str
Open file object or filename.
dtype : data-type
Data type of the returned array.
For binary files, it is used to determine the size and byte-order
of the items in the file.
count : int
Number of items to read. ``-1`` means all items (i.e., the complete
file).
sep : str
Separator between items if file is a text file.
Empty ("") separator means the file should be treated as binary.
Spaces (" ") in the separator match zero or more whitespace characters.
A separator consisting only of spaces must match at least one
whitespace.
See also
--------
load, save
ndarray.tofile
loadtxt : More flexible way of loading data from a text file.
Notes
-----
Do not rely on the combination of `tofile` and `fromfile` for
data storage, as the binary files generated are are not platform
independent. In particular, no byte-order or data-type information is
saved. Data can be stored in the platform independent ``.npy`` format
using `save` and `load` instead.
Examples
--------
Construct an ndarray:
>>> dt = np.dtype([('time', [('min', int), ('sec', int)]),
... ('temp', float)])
>>> x = np.zeros((1,), dtype=dt)
>>> x['time']['min'] = 10; x['temp'] = 98.25
>>> x
array([((10, 0), 98.25)],
dtype=[('time', [('min', '<i4'), ('sec', '<i4')]), ('temp', '<f8')])
Save the raw data to disk:
>>> import os
>>> fname = os.tmpnam()
>>> x.tofile(fname)
Read the raw data from disk:
>>> np.fromfile(fname, dtype=dt)
array([((10, 0), 98.25)],
dtype=[('time', [('min', '<i4'), ('sec', '<i4')]), ('temp', '<f8')])
The recommended way to store and load data:
>>> np.save(fname, x)
>>> np.load(fname + '.npy')
array([((10, 0), 98.25)],
dtype=[('time', [('min', '<i4'), ('sec', '<i4')]), ('temp', '<f8')])
""")
add_newdoc('numpy.core.multiarray', 'frombuffer',
"""
frombuffer(buffer, dtype=float, count=-1, offset=0)
Interpret a buffer as a 1-dimensional array.
Parameters
----------
buffer : buffer_like
An object that exposes the buffer interface.
dtype : data-type, optional
Data-type of the returned array; default: float.
count : int, optional
Number of items to read. ``-1`` means all data in the buffer.
offset : int, optional
Start reading the buffer from this offset; default: 0.
Notes
-----
If the buffer has data that is not in machine byte-order, this should
be specified as part of the data-type, e.g.::
>>> dt = np.dtype(int)
>>> dt = dt.newbyteorder('>')
>>> np.frombuffer(buf, dtype=dt)
The data of the resulting array will not be byteswapped, but will be
interpreted correctly.
Examples
--------
>>> s = 'hello world'
>>> np.frombuffer(s, dtype='S1', count=5, offset=6)
array(['w', 'o', 'r', 'l', 'd'],
dtype='|S1')
""")
add_newdoc('numpy.core.multiarray', 'concatenate',
"""
concatenate((a1, a2, ...), axis=0)
Join a sequence of arrays along an existing axis.
Parameters
----------
a1, a2, ... : sequence of array_like
The arrays must have the same shape, except in the dimension
corresponding to `axis` (the first, by default).
axis : int, optional
The axis along which the arrays will be joined. Default is 0.
Returns
-------
res : ndarray
The concatenated array.
See Also
--------
ma.concatenate : Concatenate function that preserves input masks.
array_split : Split an array into multiple sub-arrays of equal or
near-equal size.
split : Split array into a list of multiple sub-arrays of equal size.
hsplit : Split array into multiple sub-arrays horizontally (column wise)
vsplit : Split array into multiple sub-arrays vertically (row wise)
dsplit : Split array into multiple sub-arrays along the 3rd axis (depth).
stack : Stack a sequence of arrays along a new axis.
hstack : Stack arrays in sequence horizontally (column wise)
vstack : Stack arrays in sequence vertically (row wise)
dstack : Stack arrays in sequence depth wise (along third dimension)
Notes
-----
When one or more of the arrays to be concatenated is a MaskedArray,
this function will return a MaskedArray object instead of an ndarray,
but the input masks are *not* preserved. In cases where a MaskedArray
is expected as input, use the ma.concatenate function from the masked
array module instead.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> b = np.array([[5, 6]])
>>> np.concatenate((a, b), axis=0)
array([[1, 2],
[3, 4],
[5, 6]])
>>> np.concatenate((a, b.T), axis=1)
array([[1, 2, 5],
[3, 4, 6]])
This function will not preserve masking of MaskedArray inputs.
>>> a = np.ma.arange(3)
>>> a[1] = np.ma.masked
>>> b = np.arange(2, 5)
>>> a
masked_array(data = [0 -- 2],
mask = [False True False],
fill_value = 999999)
>>> b
array([2, 3, 4])
>>> np.concatenate([a, b])
masked_array(data = [0 1 2 2 3 4],
mask = False,
fill_value = 999999)
>>> np.ma.concatenate([a, b])
masked_array(data = [0 -- 2 2 3 4],
mask = [False True False False False False],
fill_value = 999999)
""")
add_newdoc('numpy.core', 'inner',
"""
inner(a, b)
Inner product of two arrays.
Ordinary inner product of vectors for 1-D arrays (without complex
conjugation), in higher dimensions a sum product over the last axes.
Parameters
----------
a, b : array_like
If `a` and `b` are nonscalar, their last dimensions of must match.
Returns
-------
out : ndarray
`out.shape = a.shape[:-1] + b.shape[:-1]`
Raises
------
ValueError
If the last dimension of `a` and `b` has different size.
See Also
--------
tensordot : Sum products over arbitrary axes.
dot : Generalised matrix product, using second last dimension of `b`.
einsum : Einstein summation convention.
Notes
-----
For vectors (1-D arrays) it computes the ordinary inner-product::
np.inner(a, b) = sum(a[:]*b[:])
More generally, if `ndim(a) = r > 0` and `ndim(b) = s > 0`::
np.inner(a, b) = np.tensordot(a, b, axes=(-1,-1))
or explicitly::
np.inner(a, b)[i0,...,ir-1,j0,...,js-1]
= sum(a[i0,...,ir-1,:]*b[j0,...,js-1,:])
In addition `a` or `b` may be scalars, in which case::
np.inner(a,b) = a*b
Examples
--------
Ordinary inner product for vectors:
>>> a = np.array([1,2,3])
>>> b = np.array([0,1,0])
>>> np.inner(a, b)
2
A multidimensional example:
>>> a = np.arange(24).reshape((2,3,4))
>>> b = np.arange(4)
>>> np.inner(a, b)
array([[ 14, 38, 62],
[ 86, 110, 134]])
An example where `b` is a scalar:
>>> np.inner(np.eye(2), 7)
array([[ 7., 0.],
[ 0., 7.]])
""")
add_newdoc('numpy.core', 'fastCopyAndTranspose',
"""_fastCopyAndTranspose(a)""")
add_newdoc('numpy.core.multiarray', 'correlate',
"""cross_correlate(a,v, mode=0)""")
add_newdoc('numpy.core.multiarray', 'arange',
"""
arange([start,] stop[, step,], dtype=None)
Return evenly spaced values within a given interval.
Values are generated within the half-open interval ``[start, stop)``
(in other words, the interval including `start` but excluding `stop`).
For integer arguments the function is equivalent to the Python built-in
`range <http://docs.python.org/lib/built-in-funcs.html>`_ function,
but returns an ndarray rather than a list.
When using a non-integer step, such as 0.1, the results will often not
be consistent. It is better to use ``linspace`` for these cases.
Parameters
----------
start : number, optional
Start of interval. The interval includes this value. The default
start value is 0.
stop : number
End of interval. The interval does not include this value, except
in some cases where `step` is not an integer and floating point
round-off affects the length of `out`.
step : number, optional
Spacing between values. For any output `out`, this is the distance
between two adjacent values, ``out[i+1] - out[i]``. The default
step size is 1. If `step` is specified, `start` must also be given.
dtype : dtype
The type of the output array. If `dtype` is not given, infer the data
type from the other input arguments.
Returns
-------
arange : ndarray
Array of evenly spaced values.
For floating point arguments, the length of the result is
``ceil((stop - start)/step)``. Because of floating point overflow,
this rule may result in the last element of `out` being greater
than `stop`.
See Also
--------
linspace : Evenly spaced numbers with careful handling of endpoints.
ogrid: Arrays of evenly spaced numbers in N-dimensions.
mgrid: Grid-shaped arrays of evenly spaced numbers in N-dimensions.
Examples
--------
>>> np.arange(3)
array([0, 1, 2])
>>> np.arange(3.0)
array([ 0., 1., 2.])
>>> np.arange(3,7)
array([3, 4, 5, 6])
>>> np.arange(3,7,2)
array([3, 5])
""")
add_newdoc('numpy.core.multiarray', '_get_ndarray_c_version',
"""_get_ndarray_c_version()
Return the compile time NDARRAY_VERSION number.
""")
add_newdoc('numpy.core.multiarray', '_reconstruct',
"""_reconstruct(subtype, shape, dtype)
Construct an empty array. Used by Pickles.
""")
add_newdoc('numpy.core.multiarray', 'set_string_function',
"""
set_string_function(f, repr=1)
Internal method to set a function to be used when pretty printing arrays.
""")
add_newdoc('numpy.core.multiarray', 'set_numeric_ops',
"""
set_numeric_ops(op1=func1, op2=func2, ...)
Set numerical operators for array objects.
Parameters
----------
op1, op2, ... : callable
Each ``op = func`` pair describes an operator to be replaced.
For example, ``add = lambda x, y: np.add(x, y) % 5`` would replace
addition by modulus 5 addition.
Returns
-------
saved_ops : list of callables
A list of all operators, stored before making replacements.
Notes
-----
.. WARNING::
Use with care! Incorrect usage may lead to memory errors.
A function replacing an operator cannot make use of that operator.
For example, when replacing add, you may not use ``+``. Instead,
directly call ufuncs.
Examples
--------
>>> def add_mod5(x, y):
... return np.add(x, y) % 5
...
>>> old_funcs = np.set_numeric_ops(add=add_mod5)
>>> x = np.arange(12).reshape((3, 4))
>>> x + x
array([[0, 2, 4, 1],
[3, 0, 2, 4],
[1, 3, 0, 2]])
>>> ignore = np.set_numeric_ops(**old_funcs) # restore operators
""")
add_newdoc('numpy.core.multiarray', 'where',
"""
where(condition, [x, y])
Return elements, either from `x` or `y`, depending on `condition`.
If only `condition` is given, return ``condition.nonzero()``.
Parameters
----------
condition : array_like, bool
When True, yield `x`, otherwise yield `y`.
x, y : array_like, optional
Values from which to choose. `x` and `y` need to have the same
shape as `condition`.
Returns
-------
out : ndarray or tuple of ndarrays
If both `x` and `y` are specified, the output array contains
elements of `x` where `condition` is True, and elements from
`y` elsewhere.
If only `condition` is given, return the tuple
``condition.nonzero()``, the indices where `condition` is True.
See Also
--------
nonzero, choose
Notes
-----
If `x` and `y` are given and input arrays are 1-D, `where` is
equivalent to::
[xv if c else yv for (c,xv,yv) in zip(condition,x,y)]
Examples
--------
>>> np.where([[True, False], [True, True]],
... [[1, 2], [3, 4]],
... [[9, 8], [7, 6]])
array([[1, 8],
[3, 4]])
>>> np.where([[0, 1], [1, 0]])
(array([0, 1]), array([1, 0]))
>>> x = np.arange(9.).reshape(3, 3)
>>> np.where( x > 5 )
(array([2, 2, 2]), array([0, 1, 2]))
>>> x[np.where( x > 3.0 )] # Note: result is 1D.
array([ 4., 5., 6., 7., 8.])
>>> np.where(x < 5, x, -1) # Note: broadcasting.
array([[ 0., 1., 2.],
[ 3., 4., -1.],
[-1., -1., -1.]])
Find the indices of elements of `x` that are in `goodvalues`.
>>> goodvalues = [3, 4, 7]
>>> ix = np.in1d(x.ravel(), goodvalues).reshape(x.shape)
>>> ix
array([[False, False, False],
[ True, True, False],
[False, True, False]], dtype=bool)
>>> np.where(ix)
(array([1, 1, 2]), array([0, 1, 1]))
""")
add_newdoc('numpy.core.multiarray', 'lexsort',
"""
lexsort(keys, axis=-1)
Perform an indirect sort using a sequence of keys.
Given multiple sorting keys, which can be interpreted as columns in a
spreadsheet, lexsort returns an array of integer indices that describes
the sort order by multiple columns. The last key in the sequence is used
for the primary sort order, the second-to-last key for the secondary sort
order, and so on. The keys argument must be a sequence of objects that
can be converted to arrays of the same shape. If a 2D array is provided
for the keys argument, it's rows are interpreted as the sorting keys and
sorting is according to the last row, second last row etc.
Parameters
----------
keys : (k, N) array or tuple containing k (N,)-shaped sequences
The `k` different "columns" to be sorted. The last column (or row if
`keys` is a 2D array) is the primary sort key.
axis : int, optional
Axis to be indirectly sorted. By default, sort over the last axis.
Returns
-------
indices : (N,) ndarray of ints
Array of indices that sort the keys along the specified axis.
See Also
--------
argsort : Indirect sort.
ndarray.sort : In-place sort.
sort : Return a sorted copy of an array.
Examples
--------
Sort names: first by surname, then by name.
>>> surnames = ('Hertz', 'Galilei', 'Hertz')
>>> first_names = ('Heinrich', 'Galileo', 'Gustav')
>>> ind = np.lexsort((first_names, surnames))
>>> ind
array([1, 2, 0])
>>> [surnames[i] + ", " + first_names[i] for i in ind]
['Galilei, Galileo', 'Hertz, Gustav', 'Hertz, Heinrich']
Sort two columns of numbers:
>>> a = [1,5,1,4,3,4,4] # First column
>>> b = [9,4,0,4,0,2,1] # Second column
>>> ind = np.lexsort((b,a)) # Sort by a, then by b
>>> print ind
[2 0 4 6 5 3 1]
>>> [(a[i],b[i]) for i in ind]
[(1, 0), (1, 9), (3, 0), (4, 1), (4, 2), (4, 4), (5, 4)]
Note that sorting is first according to the elements of ``a``.
Secondary sorting is according to the elements of ``b``.
A normal ``argsort`` would have yielded:
>>> [(a[i],b[i]) for i in np.argsort(a)]
[(1, 9), (1, 0), (3, 0), (4, 4), (4, 2), (4, 1), (5, 4)]
Structured arrays are sorted lexically by ``argsort``:
>>> x = np.array([(1,9), (5,4), (1,0), (4,4), (3,0), (4,2), (4,1)],
... dtype=np.dtype([('x', int), ('y', int)]))
>>> np.argsort(x) # or np.argsort(x, order=('x', 'y'))
array([2, 0, 4, 6, 5, 3, 1])
""")
add_newdoc('numpy.core.multiarray', 'can_cast',
"""
can_cast(from, totype, casting = 'safe')
Returns True if cast between data types can occur according to the
casting rule. If from is a scalar or array scalar, also returns
True if the scalar value can be cast without overflow or truncation
to an integer.
Parameters
----------
from : dtype, dtype specifier, scalar, or array
Data type, scalar, or array to cast from.
totype : dtype or dtype specifier
Data type to cast to.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
Returns
-------
out : bool
True if cast can occur according to the casting rule.
Notes
-----
Starting in NumPy 1.9, can_cast function now returns False in 'safe'
casting mode for integer/float dtype and string dtype if the string dtype
length is not long enough to store the max integer/float value converted
to a string. Previously can_cast in 'safe' mode returned True for
integer/float dtype and a string dtype of any length.
See also
--------
dtype, result_type
Examples
--------
Basic examples
>>> np.can_cast(np.int32, np.int64)
True
>>> np.can_cast(np.float64, np.complex)
True
>>> np.can_cast(np.complex, np.float)
False
>>> np.can_cast('i8', 'f8')
True
>>> np.can_cast('i8', 'f4')
False
>>> np.can_cast('i4', 'S4')
False
Casting scalars
>>> np.can_cast(100, 'i1')
True
>>> np.can_cast(150, 'i1')
False
>>> np.can_cast(150, 'u1')
True
>>> np.can_cast(3.5e100, np.float32)
False
>>> np.can_cast(1000.0, np.float32)
True
Array scalar checks the value, array does not
>>> np.can_cast(np.array(1000.0), np.float32)
True
>>> np.can_cast(np.array([1000.0]), np.float32)
False
Using the casting rules
>>> np.can_cast('i8', 'i8', 'no')
True
>>> np.can_cast('<i8', '>i8', 'no')
False
>>> np.can_cast('<i8', '>i8', 'equiv')
True
>>> np.can_cast('<i4', '>i8', 'equiv')
False
>>> np.can_cast('<i4', '>i8', 'safe')
True
>>> np.can_cast('<i8', '>i4', 'safe')
False
>>> np.can_cast('<i8', '>i4', 'same_kind')
True
>>> np.can_cast('<i8', '>u4', 'same_kind')
False
>>> np.can_cast('<i8', '>u4', 'unsafe')
True
""")
add_newdoc('numpy.core.multiarray', 'promote_types',
"""
promote_types(type1, type2)
Returns the data type with the smallest size and smallest scalar
kind to which both ``type1`` and ``type2`` may be safely cast.
The returned data type is always in native byte order.
This function is symmetric and associative.
Parameters
----------
type1 : dtype or dtype specifier
First data type.
type2 : dtype or dtype specifier
Second data type.
Returns
-------
out : dtype
The promoted data type.
Notes
-----
.. versionadded:: 1.6.0
Starting in NumPy 1.9, promote_types function now returns a valid string
length when given an integer or float dtype as one argument and a string
dtype as another argument. Previously it always returned the input string
dtype, even if it wasn't long enough to store the max integer/float value
converted to a string.
See Also
--------
result_type, dtype, can_cast
Examples
--------
>>> np.promote_types('f4', 'f8')
dtype('float64')
>>> np.promote_types('i8', 'f4')
dtype('float64')
>>> np.promote_types('>i8', '<c8')
dtype('complex128')
>>> np.promote_types('i4', 'S8')
dtype('S11')
""")
add_newdoc('numpy.core.multiarray', 'min_scalar_type',
"""
min_scalar_type(a)
For scalar ``a``, returns the data type with the smallest size
and smallest scalar kind which can hold its value. For non-scalar
array ``a``, returns the vector's dtype unmodified.
Floating point values are not demoted to integers,
and complex values are not demoted to floats.
Parameters
----------
a : scalar or array_like
The value whose minimal data type is to be found.
Returns
-------
out : dtype
The minimal data type.
Notes
-----
.. versionadded:: 1.6.0
See Also
--------
result_type, promote_types, dtype, can_cast
Examples
--------
>>> np.min_scalar_type(10)
dtype('uint8')
>>> np.min_scalar_type(-260)
dtype('int16')
>>> np.min_scalar_type(3.1)
dtype('float16')
>>> np.min_scalar_type(1e50)
dtype('float64')
>>> np.min_scalar_type(np.arange(4,dtype='f8'))
dtype('float64')
""")
add_newdoc('numpy.core.multiarray', 'result_type',
"""
result_type(*arrays_and_dtypes)
Returns the type that results from applying the NumPy
type promotion rules to the arguments.
Type promotion in NumPy works similarly to the rules in languages
like C++, with some slight differences. When both scalars and
arrays are used, the array's type takes precedence and the actual value
of the scalar is taken into account.
For example, calculating 3*a, where a is an array of 32-bit floats,
intuitively should result in a 32-bit float output. If the 3 is a
32-bit integer, the NumPy rules indicate it can't convert losslessly
into a 32-bit float, so a 64-bit float should be the result type.
By examining the value of the constant, '3', we see that it fits in
an 8-bit integer, which can be cast losslessly into the 32-bit float.
Parameters
----------
arrays_and_dtypes : list of arrays and dtypes
The operands of some operation whose result type is needed.
Returns
-------
out : dtype
The result type.
See also
--------
dtype, promote_types, min_scalar_type, can_cast
Notes
-----
.. versionadded:: 1.6.0
The specific algorithm used is as follows.
Categories are determined by first checking which of boolean,
integer (int/uint), or floating point (float/complex) the maximum
kind of all the arrays and the scalars are.
If there are only scalars or the maximum category of the scalars
is higher than the maximum category of the arrays,
the data types are combined with :func:`promote_types`
to produce the return value.
Otherwise, `min_scalar_type` is called on each array, and
the resulting data types are all combined with :func:`promote_types`
to produce the return value.
The set of int values is not a subset of the uint values for types
with the same number of bits, something not reflected in
:func:`min_scalar_type`, but handled as a special case in `result_type`.
Examples
--------
>>> np.result_type(3, np.arange(7, dtype='i1'))
dtype('int8')
>>> np.result_type('i4', 'c8')
dtype('complex128')
>>> np.result_type(3.0, -2)
dtype('float64')
""")
add_newdoc('numpy.core.multiarray', 'newbuffer',
"""
newbuffer(size)
Return a new uninitialized buffer object.
Parameters
----------
size : int
Size in bytes of returned buffer object.
Returns
-------
newbuffer : buffer object
Returned, uninitialized buffer object of `size` bytes.
""")
add_newdoc('numpy.core.multiarray', 'getbuffer',
"""
getbuffer(obj [,offset[, size]])
Create a buffer object from the given object referencing a slice of
length size starting at offset.
Default is the entire buffer. A read-write buffer is attempted followed
by a read-only buffer.
Parameters
----------
obj : object
offset : int, optional
size : int, optional
Returns
-------
buffer_obj : buffer
Examples
--------
>>> buf = np.getbuffer(np.ones(5), 1, 3)
>>> len(buf)
3
>>> buf[0]
'\\x00'
>>> buf
<read-write buffer for 0x8af1e70, size 3, offset 1 at 0x8ba4ec0>
""")
add_newdoc('numpy.core', 'dot',
"""
dot(a, b, out=None)
Dot product of two arrays.
For 2-D arrays it is equivalent to matrix multiplication, and for 1-D
arrays to inner product of vectors (without complex conjugation). For
N dimensions it is a sum product over the last axis of `a` and
the second-to-last of `b`::
dot(a, b)[i,j,k,m] = sum(a[i,j,:] * b[k,:,m])
Parameters
----------
a : array_like
First argument.
b : array_like
Second argument.
out : ndarray, optional
Output argument. This must have the exact kind that would be returned
if it was not used. In particular, it must have the right type, must be
C-contiguous, and its dtype must be the dtype that would be returned
for `dot(a,b)`. This is a performance feature. Therefore, if these
conditions are not met, an exception is raised, instead of attempting
to be flexible.
Returns
-------
output : ndarray
Returns the dot product of `a` and `b`. If `a` and `b` are both
scalars or both 1-D arrays then a scalar is returned; otherwise
an array is returned.
If `out` is given, then it is returned.
Raises
------
ValueError
If the last dimension of `a` is not the same size as
the second-to-last dimension of `b`.
See Also
--------
vdot : Complex-conjugating dot product.
tensordot : Sum products over arbitrary axes.
einsum : Einstein summation convention.
Examples
--------
>>> np.dot(3, 4)
12
Neither argument is complex-conjugated:
>>> np.dot([2j, 3j], [2j, 3j])
(-13+0j)
For 2-D arrays it's the matrix product:
>>> a = [[1, 0], [0, 1]]
>>> b = [[4, 1], [2, 2]]
>>> np.dot(a, b)
array([[4, 1],
[2, 2]])
>>> a = np.arange(3*4*5*6).reshape((3,4,5,6))
>>> b = np.arange(3*4*5*6)[::-1].reshape((5,4,6,3))
>>> np.dot(a, b)[2,3,2,1,2,2]
499128
>>> sum(a[2,3,2,:] * b[1,2,:,2])
499128
""")
add_newdoc('numpy.core', 'einsum',
"""
einsum(subscripts, *operands, out=None, dtype=None, order='K', casting='safe')
Evaluates the Einstein summation convention on the operands.
Using the Einstein summation convention, many common multi-dimensional
array operations can be represented in a simple fashion. This function
provides a way compute such summations. The best way to understand this
function is to try the examples below, which show how many common NumPy
functions can be implemented as calls to `einsum`.
Parameters
----------
subscripts : str
Specifies the subscripts for summation.
operands : list of array_like
These are the arrays for the operation.
out : ndarray, optional
If provided, the calculation is done into this array.
dtype : data-type, optional
If provided, forces the calculation to use the data type specified.
Note that you may have to also give a more liberal `casting`
parameter to allow the conversions.
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout of the output. 'C' means it should
be C contiguous. 'F' means it should be Fortran contiguous,
'A' means it should be 'F' if the inputs are all 'F', 'C' otherwise.
'K' means it should be as close to the layout as the inputs as
is possible, including arbitrarily permuted axes.
Default is 'K'.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur. Setting this to
'unsafe' is not recommended, as it can adversely affect accumulations.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
Returns
-------
output : ndarray
The calculation based on the Einstein summation convention.
See Also
--------
dot, inner, outer, tensordot
Notes
-----
.. versionadded:: 1.6.0
The subscripts string is a comma-separated list of subscript labels,
where each label refers to a dimension of the corresponding operand.
Repeated subscripts labels in one operand take the diagonal. For example,
``np.einsum('ii', a)`` is equivalent to ``np.trace(a)``.
Whenever a label is repeated, it is summed, so ``np.einsum('i,i', a, b)``
is equivalent to ``np.inner(a,b)``. If a label appears only once,
it is not summed, so ``np.einsum('i', a)`` produces a view of ``a``
with no changes.
The order of labels in the output is by default alphabetical. This
means that ``np.einsum('ij', a)`` doesn't affect a 2D array, while
``np.einsum('ji', a)`` takes its transpose.
The output can be controlled by specifying output subscript labels
as well. This specifies the label order, and allows summing to
be disallowed or forced when desired. The call ``np.einsum('i->', a)``
is like ``np.sum(a, axis=-1)``, and ``np.einsum('ii->i', a)``
is like ``np.diag(a)``. The difference is that `einsum` does not
allow broadcasting by default.
To enable and control broadcasting, use an ellipsis. Default
NumPy-style broadcasting is done by adding an ellipsis
to the left of each term, like ``np.einsum('...ii->...i', a)``.
To take the trace along the first and last axes,
you can do ``np.einsum('i...i', a)``, or to do a matrix-matrix
product with the left-most indices instead of rightmost, you can do
``np.einsum('ij...,jk...->ik...', a, b)``.
When there is only one operand, no axes are summed, and no output
parameter is provided, a view into the operand is returned instead
of a new array. Thus, taking the diagonal as ``np.einsum('ii->i', a)``
produces a view.
An alternative way to provide the subscripts and operands is as
``einsum(op0, sublist0, op1, sublist1, ..., [sublistout])``. The examples
below have corresponding `einsum` calls with the two parameter methods.
.. versionadded:: 1.10.0
Views returned from einsum are now writeable whenever the input array
is writeable. For example, ``np.einsum('ijk...->kji...', a)`` will now
have the same effect as ``np.swapaxes(a, 0, 2)`` and
``np.einsum('ii->i', a)`` will return a writeable view of the diagonal
of a 2D array.
Examples
--------
>>> a = np.arange(25).reshape(5,5)
>>> b = np.arange(5)
>>> c = np.arange(6).reshape(2,3)
>>> np.einsum('ii', a)
60
>>> np.einsum(a, [0,0])
60
>>> np.trace(a)
60
>>> np.einsum('ii->i', a)
array([ 0, 6, 12, 18, 24])
>>> np.einsum(a, [0,0], [0])
array([ 0, 6, 12, 18, 24])
>>> np.diag(a)
array([ 0, 6, 12, 18, 24])
>>> np.einsum('ij,j', a, b)
array([ 30, 80, 130, 180, 230])
>>> np.einsum(a, [0,1], b, [1])
array([ 30, 80, 130, 180, 230])
>>> np.dot(a, b)
array([ 30, 80, 130, 180, 230])
>>> np.einsum('...j,j', a, b)
array([ 30, 80, 130, 180, 230])
>>> np.einsum('ji', c)
array([[0, 3],
[1, 4],
[2, 5]])
>>> np.einsum(c, [1,0])
array([[0, 3],
[1, 4],
[2, 5]])
>>> c.T
array([[0, 3],
[1, 4],
[2, 5]])
>>> np.einsum('..., ...', 3, c)
array([[ 0, 3, 6],
[ 9, 12, 15]])
>>> np.einsum(3, [Ellipsis], c, [Ellipsis])
array([[ 0, 3, 6],
[ 9, 12, 15]])
>>> np.multiply(3, c)
array([[ 0, 3, 6],
[ 9, 12, 15]])
>>> np.einsum('i,i', b, b)
30
>>> np.einsum(b, [0], b, [0])
30
>>> np.inner(b,b)
30
>>> np.einsum('i,j', np.arange(2)+1, b)
array([[0, 1, 2, 3, 4],
[0, 2, 4, 6, 8]])
>>> np.einsum(np.arange(2)+1, [0], b, [1])
array([[0, 1, 2, 3, 4],
[0, 2, 4, 6, 8]])
>>> np.outer(np.arange(2)+1, b)
array([[0, 1, 2, 3, 4],
[0, 2, 4, 6, 8]])
>>> np.einsum('i...->...', a)
array([50, 55, 60, 65, 70])
>>> np.einsum(a, [0,Ellipsis], [Ellipsis])
array([50, 55, 60, 65, 70])
>>> np.sum(a, axis=0)
array([50, 55, 60, 65, 70])
>>> a = np.arange(60.).reshape(3,4,5)
>>> b = np.arange(24.).reshape(4,3,2)
>>> np.einsum('ijk,jil->kl', a, b)
array([[ 4400., 4730.],
[ 4532., 4874.],
[ 4664., 5018.],
[ 4796., 5162.],
[ 4928., 5306.]])
>>> np.einsum(a, [0,1,2], b, [1,0,3], [2,3])
array([[ 4400., 4730.],
[ 4532., 4874.],
[ 4664., 5018.],
[ 4796., 5162.],
[ 4928., 5306.]])
>>> np.tensordot(a,b, axes=([1,0],[0,1]))
array([[ 4400., 4730.],
[ 4532., 4874.],
[ 4664., 5018.],
[ 4796., 5162.],
[ 4928., 5306.]])
>>> a = np.arange(6).reshape((3,2))
>>> b = np.arange(12).reshape((4,3))
>>> np.einsum('ki,jk->ij', a, b)
array([[10, 28, 46, 64],
[13, 40, 67, 94]])
>>> np.einsum('ki,...k->i...', a, b)
array([[10, 28, 46, 64],
[13, 40, 67, 94]])
>>> np.einsum('k...,jk', a, b)
array([[10, 28, 46, 64],
[13, 40, 67, 94]])
>>> # since version 1.10.0
>>> a = np.zeros((3, 3))
>>> np.einsum('ii->i', a)[:] = 1
>>> a
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
""")
add_newdoc('numpy.core', 'vdot',
"""
vdot(a, b)
Return the dot product of two vectors.
The vdot(`a`, `b`) function handles complex numbers differently than
dot(`a`, `b`). If the first argument is complex the complex conjugate
of the first argument is used for the calculation of the dot product.
Note that `vdot` handles multidimensional arrays differently than `dot`:
it does *not* perform a matrix product, but flattens input arguments
to 1-D vectors first. Consequently, it should only be used for vectors.
Parameters
----------
a : array_like
If `a` is complex the complex conjugate is taken before calculation
of the dot product.
b : array_like
Second argument to the dot product.
Returns
-------
output : ndarray
Dot product of `a` and `b`. Can be an int, float, or
complex depending on the types of `a` and `b`.
See Also
--------
dot : Return the dot product without using the complex conjugate of the
first argument.
Examples
--------
>>> a = np.array([1+2j,3+4j])
>>> b = np.array([5+6j,7+8j])
>>> np.vdot(a, b)
(70-8j)
>>> np.vdot(b, a)
(70+8j)
Note that higher-dimensional arrays are flattened!
>>> a = np.array([[1, 4], [5, 6]])
>>> b = np.array([[4, 1], [2, 2]])
>>> np.vdot(a, b)
30
>>> np.vdot(b, a)
30
>>> 1*4 + 4*1 + 5*2 + 6*2
30
""")
##############################################################################
#
# Documentation for ndarray attributes and methods
#
##############################################################################
##############################################################################
#
# ndarray object
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'ndarray',
"""
ndarray(shape, dtype=float, buffer=None, offset=0,
strides=None, order=None)
An array object represents a multidimensional, homogeneous array
of fixed-size items. An associated data-type object describes the
format of each element in the array (its byte-order, how many bytes it
occupies in memory, whether it is an integer, a floating point number,
or something else, etc.)
Arrays should be constructed using `array`, `zeros` or `empty` (refer
to the See Also section below). The parameters given here refer to
a low-level method (`ndarray(...)`) for instantiating an array.
For more information, refer to the `numpy` module and examine the
the methods and attributes of an array.
Parameters
----------
(for the __new__ method; see Notes below)
shape : tuple of ints
Shape of created array.
dtype : data-type, optional
Any object that can be interpreted as a numpy data type.
buffer : object exposing buffer interface, optional
Used to fill the array with data.
offset : int, optional
Offset of array data in buffer.
strides : tuple of ints, optional
Strides of data in memory.
order : {'C', 'F'}, optional
Row-major or column-major order.
Attributes
----------
T : ndarray
Transpose of the array.
data : buffer
The array's elements, in memory.
dtype : dtype object
Describes the format of the elements in the array.
flags : dict
Dictionary containing information related to memory use, e.g.,
'C_CONTIGUOUS', 'OWNDATA', 'WRITEABLE', etc.
flat : numpy.flatiter object
Flattened version of the array as an iterator. The iterator
allows assignments, e.g., ``x.flat = 3`` (See `ndarray.flat` for
assignment examples; TODO).
imag : ndarray
Imaginary part of the array.
real : ndarray
Real part of the array.
size : int
Number of elements in the array.
itemsize : int
The memory use of each array element in bytes.
nbytes : int
The total number of bytes required to store the array data,
i.e., ``itemsize * size``.
ndim : int
The array's number of dimensions.
shape : tuple of ints
Shape of the array.
strides : tuple of ints
The step-size required to move from one element to the next in
memory. For example, a contiguous ``(3, 4)`` array of type
``int16`` in C-order has strides ``(8, 2)``. This implies that
to move from element to element in memory requires jumps of 2 bytes.
To move from row-to-row, one needs to jump 8 bytes at a time
(``2 * 4``).
ctypes : ctypes object
Class containing properties of the array needed for interaction
with ctypes.
base : ndarray
If the array is a view into another array, that array is its `base`
(unless that array is also a view). The `base` array is where the
array data is actually stored.
See Also
--------
array : Construct an array.
zeros : Create an array, each element of which is zero.
empty : Create an array, but leave its allocated memory unchanged (i.e.,
it contains "garbage").
dtype : Create a data-type.
Notes
-----
There are two modes of creating an array using ``__new__``:
1. If `buffer` is None, then only `shape`, `dtype`, and `order`
are used.
2. If `buffer` is an object exposing the buffer interface, then
all keywords are interpreted.
No ``__init__`` method is needed because the array is fully initialized
after the ``__new__`` method.
Examples
--------
These examples illustrate the low-level `ndarray` constructor. Refer
to the `See Also` section above for easier ways of constructing an
ndarray.
First mode, `buffer` is None:
>>> np.ndarray(shape=(2,2), dtype=float, order='F')
array([[ -1.13698227e+002, 4.25087011e-303],
[ 2.88528414e-306, 3.27025015e-309]]) #random
Second mode:
>>> np.ndarray((2,), buffer=np.array([1,2,3]),
... offset=np.int_().itemsize,
... dtype=int) # offset = 1*itemsize, i.e. skip first element
array([2, 3])
""")
##############################################################################
#
# ndarray attributes
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_interface__',
"""Array protocol: Python side."""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_finalize__',
"""None."""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_priority__',
"""Array priority."""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_struct__',
"""Array protocol: C-struct side."""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('_as_parameter_',
"""Allow the array to be interpreted as a ctypes object by returning the
data-memory location as an integer
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('base',
"""
Base object if memory is from some other object.
Examples
--------
The base of an array that owns its memory is None:
>>> x = np.array([1,2,3,4])
>>> x.base is None
True
Slicing creates a view, whose memory is shared with x:
>>> y = x[2:]
>>> y.base is x
True
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('ctypes',
"""
An object to simplify the interaction of the array with the ctypes
module.
This attribute creates an object that makes it easier to use arrays
when calling shared libraries with the ctypes module. The returned
object has, among others, data, shape, and strides attributes (see
Notes below) which themselves return ctypes objects that can be used
as arguments to a shared library.
Parameters
----------
None
Returns
-------
c : Python object
Possessing attributes data, shape, strides, etc.
See Also
--------
numpy.ctypeslib
Notes
-----
Below are the public attributes of this object which were documented
in "Guide to NumPy" (we have omitted undocumented public attributes,
as well as documented private attributes):
* data: A pointer to the memory area of the array as a Python integer.
This memory area may contain data that is not aligned, or not in correct
byte-order. The memory area may not even be writeable. The array
flags and data-type of this array should be respected when passing this
attribute to arbitrary C-code to avoid trouble that can include Python
crashing. User Beware! The value of this attribute is exactly the same
as self._array_interface_['data'][0].
* shape (c_intp*self.ndim): A ctypes array of length self.ndim where
the basetype is the C-integer corresponding to dtype('p') on this
platform. This base-type could be c_int, c_long, or c_longlong
depending on the platform. The c_intp type is defined accordingly in
numpy.ctypeslib. The ctypes array contains the shape of the underlying
array.
* strides (c_intp*self.ndim): A ctypes array of length self.ndim where
the basetype is the same as for the shape attribute. This ctypes array
contains the strides information from the underlying array. This strides
information is important for showing how many bytes must be jumped to
get to the next element in the array.
* data_as(obj): Return the data pointer cast to a particular c-types object.
For example, calling self._as_parameter_ is equivalent to
self.data_as(ctypes.c_void_p). Perhaps you want to use the data as a
pointer to a ctypes array of floating-point data:
self.data_as(ctypes.POINTER(ctypes.c_double)).
* shape_as(obj): Return the shape tuple as an array of some other c-types
type. For example: self.shape_as(ctypes.c_short).
* strides_as(obj): Return the strides tuple as an array of some other
c-types type. For example: self.strides_as(ctypes.c_longlong).
Be careful using the ctypes attribute - especially on temporary
arrays or arrays constructed on the fly. For example, calling
``(a+b).ctypes.data_as(ctypes.c_void_p)`` returns a pointer to memory
that is invalid because the array created as (a+b) is deallocated
before the next Python statement. You can avoid this problem using
either ``c=a+b`` or ``ct=(a+b).ctypes``. In the latter case, ct will
hold a reference to the array until ct is deleted or re-assigned.
If the ctypes module is not available, then the ctypes attribute
of array objects still returns something useful, but ctypes objects
are not returned and errors may be raised instead. In particular,
the object will still have the as parameter attribute which will
return an integer equal to the data attribute.
Examples
--------
>>> import ctypes
>>> x
array([[0, 1],
[2, 3]])
>>> x.ctypes.data
30439712
>>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_long))
<ctypes.LP_c_long object at 0x01F01300>
>>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_long)).contents
c_long(0)
>>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_longlong)).contents
c_longlong(4294967296L)
>>> x.ctypes.shape
<numpy.core._internal.c_long_Array_2 object at 0x01FFD580>
>>> x.ctypes.shape_as(ctypes.c_long)
<numpy.core._internal.c_long_Array_2 object at 0x01FCE620>
>>> x.ctypes.strides
<numpy.core._internal.c_long_Array_2 object at 0x01FCE620>
>>> x.ctypes.strides_as(ctypes.c_longlong)
<numpy.core._internal.c_longlong_Array_2 object at 0x01F01300>
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('data',
"""Python buffer object pointing to the start of the array's data."""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('dtype',
"""
Data-type of the array's elements.
Parameters
----------
None
Returns
-------
d : numpy dtype object
See Also
--------
numpy.dtype
Examples
--------
>>> x
array([[0, 1],
[2, 3]])
>>> x.dtype
dtype('int32')
>>> type(x.dtype)
<type 'numpy.dtype'>
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('imag',
"""
The imaginary part of the array.
Examples
--------
>>> x = np.sqrt([1+0j, 0+1j])
>>> x.imag
array([ 0. , 0.70710678])
>>> x.imag.dtype
dtype('float64')
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('itemsize',
"""
Length of one array element in bytes.
Examples
--------
>>> x = np.array([1,2,3], dtype=np.float64)
>>> x.itemsize
8
>>> x = np.array([1,2,3], dtype=np.complex128)
>>> x.itemsize
16
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('flags',
"""
Information about the memory layout of the array.
Attributes
----------
C_CONTIGUOUS (C)
The data is in a single, C-style contiguous segment.
F_CONTIGUOUS (F)
The data is in a single, Fortran-style contiguous segment.
OWNDATA (O)
The array owns the memory it uses or borrows it from another object.
WRITEABLE (W)
The data area can be written to. Setting this to False locks
the data, making it read-only. A view (slice, etc.) inherits WRITEABLE
from its base array at creation time, but a view of a writeable
array may be subsequently locked while the base array remains writeable.
(The opposite is not true, in that a view of a locked array may not
be made writeable. However, currently, locking a base object does not
lock any views that already reference it, so under that circumstance it
is possible to alter the contents of a locked array via a previously
created writeable view onto it.) Attempting to change a non-writeable
array raises a RuntimeError exception.
ALIGNED (A)
The data and all elements are aligned appropriately for the hardware.
UPDATEIFCOPY (U)
This array is a copy of some other array. When this array is
deallocated, the base array will be updated with the contents of
this array.
FNC
F_CONTIGUOUS and not C_CONTIGUOUS.
FORC
F_CONTIGUOUS or C_CONTIGUOUS (one-segment test).
BEHAVED (B)
ALIGNED and WRITEABLE.
CARRAY (CA)
BEHAVED and C_CONTIGUOUS.
FARRAY (FA)
BEHAVED and F_CONTIGUOUS and not C_CONTIGUOUS.
Notes
-----
The `flags` object can be accessed dictionary-like (as in ``a.flags['WRITEABLE']``),
or by using lowercased attribute names (as in ``a.flags.writeable``). Short flag
names are only supported in dictionary access.
Only the UPDATEIFCOPY, WRITEABLE, and ALIGNED flags can be changed by
the user, via direct assignment to the attribute or dictionary entry,
or by calling `ndarray.setflags`.
The array flags cannot be set arbitrarily:
- UPDATEIFCOPY can only be set ``False``.
- ALIGNED can only be set ``True`` if the data is truly aligned.
- WRITEABLE can only be set ``True`` if the array owns its own memory
or the ultimate owner of the memory exposes a writeable buffer
interface or is a string.
Arrays can be both C-style and Fortran-style contiguous simultaneously.
This is clear for 1-dimensional arrays, but can also be true for higher
dimensional arrays.
Even for contiguous arrays a stride for a given dimension
``arr.strides[dim]`` may be *arbitrary* if ``arr.shape[dim] == 1``
or the array has no elements.
It does *not* generally hold that ``self.strides[-1] == self.itemsize``
for C-style contiguous arrays or ``self.strides[0] == self.itemsize`` for
Fortran-style contiguous arrays is true.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('flat',
"""
A 1-D iterator over the array.
This is a `numpy.flatiter` instance, which acts similarly to, but is not
a subclass of, Python's built-in iterator object.
See Also
--------
flatten : Return a copy of the array collapsed into one dimension.
flatiter
Examples
--------
>>> x = np.arange(1, 7).reshape(2, 3)
>>> x
array([[1, 2, 3],
[4, 5, 6]])
>>> x.flat[3]
4
>>> x.T
array([[1, 4],
[2, 5],
[3, 6]])
>>> x.T.flat[3]
5
>>> type(x.flat)
<type 'numpy.flatiter'>
An assignment example:
>>> x.flat = 3; x
array([[3, 3, 3],
[3, 3, 3]])
>>> x.flat[[1,4]] = 1; x
array([[3, 1, 3],
[3, 1, 3]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('nbytes',
"""
Total bytes consumed by the elements of the array.
Notes
-----
Does not include memory consumed by non-element attributes of the
array object.
Examples
--------
>>> x = np.zeros((3,5,2), dtype=np.complex128)
>>> x.nbytes
480
>>> np.prod(x.shape) * x.itemsize
480
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('ndim',
"""
Number of array dimensions.
Examples
--------
>>> x = np.array([1, 2, 3])
>>> x.ndim
1
>>> y = np.zeros((2, 3, 4))
>>> y.ndim
3
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('real',
"""
The real part of the array.
Examples
--------
>>> x = np.sqrt([1+0j, 0+1j])
>>> x.real
array([ 1. , 0.70710678])
>>> x.real.dtype
dtype('float64')
See Also
--------
numpy.real : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('shape',
"""
Tuple of array dimensions.
Notes
-----
May be used to "reshape" the array, as long as this would not
require a change in the total number of elements
Examples
--------
>>> x = np.array([1, 2, 3, 4])
>>> x.shape
(4,)
>>> y = np.zeros((2, 3, 4))
>>> y.shape
(2, 3, 4)
>>> y.shape = (3, 8)
>>> y
array([[ 0., 0., 0., 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0., 0., 0., 0.]])
>>> y.shape = (3, 6)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: total size of new array must be unchanged
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('size',
"""
Number of elements in the array.
Equivalent to ``np.prod(a.shape)``, i.e., the product of the array's
dimensions.
Examples
--------
>>> x = np.zeros((3, 5, 2), dtype=np.complex128)
>>> x.size
30
>>> np.prod(x.shape)
30
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('strides',
"""
Tuple of bytes to step in each dimension when traversing an array.
The byte offset of element ``(i[0], i[1], ..., i[n])`` in an array `a`
is::
offset = sum(np.array(i) * a.strides)
A more detailed explanation of strides can be found in the
"ndarray.rst" file in the NumPy reference guide.
Notes
-----
Imagine an array of 32-bit integers (each 4 bytes)::
x = np.array([[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]], dtype=np.int32)
This array is stored in memory as 40 bytes, one after the other
(known as a contiguous block of memory). The strides of an array tell
us how many bytes we have to skip in memory to move to the next position
along a certain axis. For example, we have to skip 4 bytes (1 value) to
move to the next column, but 20 bytes (5 values) to get to the same
position in the next row. As such, the strides for the array `x` will be
``(20, 4)``.
See Also
--------
numpy.lib.stride_tricks.as_strided
Examples
--------
>>> y = np.reshape(np.arange(2*3*4), (2,3,4))
>>> y
array([[[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]],
[[12, 13, 14, 15],
[16, 17, 18, 19],
[20, 21, 22, 23]]])
>>> y.strides
(48, 16, 4)
>>> y[1,1,1]
17
>>> offset=sum(y.strides * np.array((1,1,1)))
>>> offset/y.itemsize
17
>>> x = np.reshape(np.arange(5*6*7*8), (5,6,7,8)).transpose(2,3,1,0)
>>> x.strides
(32, 4, 224, 1344)
>>> i = np.array([3,5,2,2])
>>> offset = sum(i * x.strides)
>>> x[3,5,2,2]
813
>>> offset / x.itemsize
813
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('T',
"""
Same as self.transpose(), except that self is returned if
self.ndim < 2.
Examples
--------
>>> x = np.array([[1.,2.],[3.,4.]])
>>> x
array([[ 1., 2.],
[ 3., 4.]])
>>> x.T
array([[ 1., 3.],
[ 2., 4.]])
>>> x = np.array([1.,2.,3.,4.])
>>> x
array([ 1., 2., 3., 4.])
>>> x.T
array([ 1., 2., 3., 4.])
"""))
##############################################################################
#
# ndarray methods
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array__',
""" a.__array__(|dtype) -> reference if type unchanged, copy otherwise.
Returns either a new reference to self if dtype is not given or a new array
of provided data type if dtype is different from the current dtype of the
array.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_prepare__',
"""a.__array_prepare__(obj) -> Object of same type as ndarray object obj.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_wrap__',
"""a.__array_wrap__(obj) -> Object of same type as ndarray object a.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__copy__',
"""a.__copy__([order])
Return a copy of the array.
Parameters
----------
order : {'C', 'F', 'A'}, optional
If order is 'C' (False) then the result is contiguous (default).
If order is 'Fortran' (True) then the result has fortran order.
If order is 'Any' (None) then the result has fortran order
only if the array already is in fortran order.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__deepcopy__',
"""a.__deepcopy__() -> Deep copy of array.
Used if copy.deepcopy is called on an array.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__reduce__',
"""a.__reduce__()
For pickling.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__setstate__',
"""a.__setstate__(version, shape, dtype, isfortran, rawdata)
For unpickling.
Parameters
----------
version : int
optional pickle version. If omitted defaults to 0.
shape : tuple
dtype : data-type
isFortran : bool
rawdata : string or list
a binary string with the data (or a list if 'a' is an object array)
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('all',
"""
a.all(axis=None, out=None, keepdims=False)
Returns True if all elements evaluate to True.
Refer to `numpy.all` for full documentation.
See Also
--------
numpy.all : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('any',
"""
a.any(axis=None, out=None, keepdims=False)
Returns True if any of the elements of `a` evaluate to True.
Refer to `numpy.any` for full documentation.
See Also
--------
numpy.any : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('argmax',
"""
a.argmax(axis=None, out=None)
Return indices of the maximum values along the given axis.
Refer to `numpy.argmax` for full documentation.
See Also
--------
numpy.argmax : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('argmin',
"""
a.argmin(axis=None, out=None)
Return indices of the minimum values along the given axis of `a`.
Refer to `numpy.argmin` for detailed documentation.
See Also
--------
numpy.argmin : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('argsort',
"""
a.argsort(axis=-1, kind='quicksort', order=None)
Returns the indices that would sort this array.
Refer to `numpy.argsort` for full documentation.
See Also
--------
numpy.argsort : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('argpartition',
"""
a.argpartition(kth, axis=-1, kind='introselect', order=None)
Returns the indices that would partition this array.
Refer to `numpy.argpartition` for full documentation.
.. versionadded:: 1.8.0
See Also
--------
numpy.argpartition : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('astype',
"""
a.astype(dtype, order='K', casting='unsafe', subok=True, copy=True)
Copy of the array, cast to a specified type.
Parameters
----------
dtype : str or dtype
Typecode or data-type to which the array is cast.
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout order of the result.
'C' means C order, 'F' means Fortran order, 'A'
means 'F' order if all the arrays are Fortran contiguous,
'C' order otherwise, and 'K' means as close to the
order the array elements appear in memory as possible.
Default is 'K'.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur. Defaults to 'unsafe'
for backwards compatibility.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
subok : bool, optional
If True, then sub-classes will be passed-through (default), otherwise
the returned array will be forced to be a base-class array.
copy : bool, optional
By default, astype always returns a newly allocated array. If this
is set to false, and the `dtype`, `order`, and `subok`
requirements are satisfied, the input array is returned instead
of a copy.
Returns
-------
arr_t : ndarray
Unless `copy` is False and the other conditions for returning the input
array are satisfied (see description for `copy` input paramter), `arr_t`
is a new array of the same shape as the input array, with dtype, order
given by `dtype`, `order`.
Notes
-----
Starting in NumPy 1.9, astype method now returns an error if the string
dtype to cast to is not long enough in 'safe' casting mode to hold the max
value of integer/float array that is being casted. Previously the casting
was allowed even if the result was truncated.
Raises
------
ComplexWarning
When casting from complex to float or int. To avoid this,
one should use ``a.real.astype(t)``.
Examples
--------
>>> x = np.array([1, 2, 2.5])
>>> x
array([ 1. , 2. , 2.5])
>>> x.astype(int)
array([1, 2, 2])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('byteswap',
"""
a.byteswap(inplace)
Swap the bytes of the array elements
Toggle between low-endian and big-endian data representation by
returning a byteswapped array, optionally swapped in-place.
Parameters
----------
inplace : bool, optional
If ``True``, swap bytes in-place, default is ``False``.
Returns
-------
out : ndarray
The byteswapped array. If `inplace` is ``True``, this is
a view to self.
Examples
--------
>>> A = np.array([1, 256, 8755], dtype=np.int16)
>>> map(hex, A)
['0x1', '0x100', '0x2233']
>>> A.byteswap(True)
array([ 256, 1, 13090], dtype=int16)
>>> map(hex, A)
['0x100', '0x1', '0x3322']
Arrays of strings are not swapped
>>> A = np.array(['ceg', 'fac'])
>>> A.byteswap()
array(['ceg', 'fac'],
dtype='|S3')
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('choose',
"""
a.choose(choices, out=None, mode='raise')
Use an index array to construct a new array from a set of choices.
Refer to `numpy.choose` for full documentation.
See Also
--------
numpy.choose : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('clip',
"""
a.clip(min=None, max=None, out=None)
Return an array whose values are limited to ``[min, max]``.
One of max or min must be given.
Refer to `numpy.clip` for full documentation.
See Also
--------
numpy.clip : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('compress',
"""
a.compress(condition, axis=None, out=None)
Return selected slices of this array along given axis.
Refer to `numpy.compress` for full documentation.
See Also
--------
numpy.compress : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('conj',
"""
a.conj()
Complex-conjugate all elements.
Refer to `numpy.conjugate` for full documentation.
See Also
--------
numpy.conjugate : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('conjugate',
"""
a.conjugate()
Return the complex conjugate, element-wise.
Refer to `numpy.conjugate` for full documentation.
See Also
--------
numpy.conjugate : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('copy',
"""
a.copy(order='C')
Return a copy of the array.
Parameters
----------
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout of the copy. 'C' means C-order,
'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,
'C' otherwise. 'K' means match the layout of `a` as closely
as possible. (Note that this function and :func:numpy.copy are very
similar, but have different default values for their order=
arguments.)
See also
--------
numpy.copy
numpy.copyto
Examples
--------
>>> x = np.array([[1,2,3],[4,5,6]], order='F')
>>> y = x.copy()
>>> x.fill(0)
>>> x
array([[0, 0, 0],
[0, 0, 0]])
>>> y
array([[1, 2, 3],
[4, 5, 6]])
>>> y.flags['C_CONTIGUOUS']
True
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('cumprod',
"""
a.cumprod(axis=None, dtype=None, out=None)
Return the cumulative product of the elements along the given axis.
Refer to `numpy.cumprod` for full documentation.
See Also
--------
numpy.cumprod : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('cumsum',
"""
a.cumsum(axis=None, dtype=None, out=None)
Return the cumulative sum of the elements along the given axis.
Refer to `numpy.cumsum` for full documentation.
See Also
--------
numpy.cumsum : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('diagonal',
"""
a.diagonal(offset=0, axis1=0, axis2=1)
Return specified diagonals. In NumPy 1.9 the returned array is a
read-only view instead of a copy as in previous NumPy versions. In
NumPy 1.10 the read-only restriction will be removed.
Refer to :func:`numpy.diagonal` for full documentation.
See Also
--------
numpy.diagonal : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('dot',
"""
a.dot(b, out=None)
Dot product of two arrays.
Refer to `numpy.dot` for full documentation.
See Also
--------
numpy.dot : equivalent function
Examples
--------
>>> a = np.eye(2)
>>> b = np.ones((2, 2)) * 2
>>> a.dot(b)
array([[ 2., 2.],
[ 2., 2.]])
This array method can be conveniently chained:
>>> a.dot(b).dot(b)
array([[ 8., 8.],
[ 8., 8.]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('dump',
"""a.dump(file)
Dump a pickle of the array to the specified file.
The array can be read back with pickle.load or numpy.load.
Parameters
----------
file : str
A string naming the dump file.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('dumps',
"""
a.dumps()
Returns the pickle of the array as a string.
pickle.loads or numpy.loads will convert the string back to an array.
Parameters
----------
None
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('fill',
"""
a.fill(value)
Fill the array with a scalar value.
Parameters
----------
value : scalar
All elements of `a` will be assigned this value.
Examples
--------
>>> a = np.array([1, 2])
>>> a.fill(0)
>>> a
array([0, 0])
>>> a = np.empty(2)
>>> a.fill(1)
>>> a
array([ 1., 1.])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('flatten',
"""
a.flatten(order='C')
Return a copy of the array collapsed into one dimension.
Parameters
----------
order : {'C', 'F', 'A'}, optional
Whether to flatten in C (row-major), Fortran (column-major) order,
or preserve the C/Fortran ordering from `a`.
The default is 'C'.
Returns
-------
y : ndarray
A copy of the input array, flattened to one dimension.
See Also
--------
ravel : Return a flattened array.
flat : A 1-D flat iterator over the array.
Examples
--------
>>> a = np.array([[1,2], [3,4]])
>>> a.flatten()
array([1, 2, 3, 4])
>>> a.flatten('F')
array([1, 3, 2, 4])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('getfield',
"""
a.getfield(dtype, offset=0)
Returns a field of the given array as a certain type.
A field is a view of the array data with a given data-type. The values in
the view are determined by the given type and the offset into the current
array in bytes. The offset needs to be such that the view dtype fits in the
array dtype; for example an array of dtype complex128 has 16-byte elements.
If taking a view with a 32-bit integer (4 bytes), the offset needs to be
between 0 and 12 bytes.
Parameters
----------
dtype : str or dtype
The data type of the view. The dtype size of the view can not be larger
than that of the array itself.
offset : int
Number of bytes to skip before beginning the element view.
Examples
--------
>>> x = np.diag([1.+1.j]*2)
>>> x[1, 1] = 2 + 4.j
>>> x
array([[ 1.+1.j, 0.+0.j],
[ 0.+0.j, 2.+4.j]])
>>> x.getfield(np.float64)
array([[ 1., 0.],
[ 0., 2.]])
By choosing an offset of 8 bytes we can select the complex part of the
array for our view:
>>> x.getfield(np.float64, offset=8)
array([[ 1., 0.],
[ 0., 4.]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('item',
"""
a.item(*args)
Copy an element of an array to a standard Python scalar and return it.
Parameters
----------
\\*args : Arguments (variable number and type)
* none: in this case, the method only works for arrays
with one element (`a.size == 1`), which element is
copied into a standard Python scalar object and returned.
* int_type: this argument is interpreted as a flat index into
the array, specifying which element to copy and return.
* tuple of int_types: functions as does a single int_type argument,
except that the argument is interpreted as an nd-index into the
array.
Returns
-------
z : Standard Python scalar object
A copy of the specified element of the array as a suitable
Python scalar
Notes
-----
When the data type of `a` is longdouble or clongdouble, item() returns
a scalar array object because there is no available Python scalar that
would not lose information. Void arrays return a buffer object for item(),
unless fields are defined, in which case a tuple is returned.
`item` is very similar to a[args], except, instead of an array scalar,
a standard Python scalar is returned. This can be useful for speeding up
access to elements of the array and doing arithmetic on elements of the
array using Python's optimized math.
Examples
--------
>>> x = np.random.randint(9, size=(3, 3))
>>> x
array([[3, 1, 7],
[2, 8, 3],
[8, 5, 3]])
>>> x.item(3)
2
>>> x.item(7)
5
>>> x.item((0, 1))
1
>>> x.item((2, 2))
3
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('itemset',
"""
a.itemset(*args)
Insert scalar into an array (scalar is cast to array's dtype, if possible)
There must be at least 1 argument, and define the last argument
as *item*. Then, ``a.itemset(*args)`` is equivalent to but faster
than ``a[args] = item``. The item should be a scalar value and `args`
must select a single item in the array `a`.
Parameters
----------
\*args : Arguments
If one argument: a scalar, only used in case `a` is of size 1.
If two arguments: the last argument is the value to be set
and must be a scalar, the first argument specifies a single array
element location. It is either an int or a tuple.
Notes
-----
Compared to indexing syntax, `itemset` provides some speed increase
for placing a scalar into a particular location in an `ndarray`,
if you must do this. However, generally this is discouraged:
among other problems, it complicates the appearance of the code.
Also, when using `itemset` (and `item`) inside a loop, be sure
to assign the methods to a local variable to avoid the attribute
look-up at each loop iteration.
Examples
--------
>>> x = np.random.randint(9, size=(3, 3))
>>> x
array([[3, 1, 7],
[2, 8, 3],
[8, 5, 3]])
>>> x.itemset(4, 0)
>>> x.itemset((2, 2), 9)
>>> x
array([[3, 1, 7],
[2, 0, 3],
[8, 5, 9]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('setasflat',
"""
a.setasflat(arr)
Equivalent to a.flat = arr.flat, but is generally more efficient.
This function does not check for overlap, so if ``arr`` and ``a``
are viewing the same data with different strides, the results will
be unpredictable.
Parameters
----------
arr : array_like
The array to copy into a.
Examples
--------
>>> a = np.arange(2*4).reshape(2,4)[:,:-1]; a
array([[0, 1, 2],
[4, 5, 6]])
>>> b = np.arange(3*3, dtype='f4').reshape(3,3).T[::-1,:-1]; b
array([[ 2., 5.],
[ 1., 4.],
[ 0., 3.]], dtype=float32)
>>> a.setasflat(b)
>>> a
array([[2, 5, 1],
[4, 0, 3]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('max',
"""
a.max(axis=None, out=None)
Return the maximum along a given axis.
Refer to `numpy.amax` for full documentation.
See Also
--------
numpy.amax : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('mean',
"""
a.mean(axis=None, dtype=None, out=None, keepdims=False)
Returns the average of the array elements along given axis.
Refer to `numpy.mean` for full documentation.
See Also
--------
numpy.mean : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('min',
"""
a.min(axis=None, out=None, keepdims=False)
Return the minimum along a given axis.
Refer to `numpy.amin` for full documentation.
See Also
--------
numpy.amin : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'may_share_memory',
"""
Determine if two arrays can share memory
The memory-bounds of a and b are computed. If they overlap then
this function returns True. Otherwise, it returns False.
A return of True does not necessarily mean that the two arrays
share any element. It just means that they *might*.
Parameters
----------
a, b : ndarray
Returns
-------
out : bool
Examples
--------
>>> np.may_share_memory(np.array([1,2]), np.array([5,8,9]))
False
""")
add_newdoc('numpy.core.multiarray', 'ndarray', ('newbyteorder',
"""
arr.newbyteorder(new_order='S')
Return the array with the same data viewed with a different byte order.
Equivalent to::
arr.view(arr.dtype.newbytorder(new_order))
Changes are also made in all fields and sub-arrays of the array data
type.
Parameters
----------
new_order : string, optional
Byte order to force; a value from the byte order specifications
above. `new_order` codes can be any of::
* 'S' - swap dtype from current to opposite endian
* {'<', 'L'} - little endian
* {'>', 'B'} - big endian
* {'=', 'N'} - native order
* {'|', 'I'} - ignore (no change to byte order)
The default value ('S') results in swapping the current
byte order. The code does a case-insensitive check on the first
letter of `new_order` for the alternatives above. For example,
any of 'B' or 'b' or 'biggish' are valid to specify big-endian.
Returns
-------
new_arr : array
New array object with the dtype reflecting given change to the
byte order.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('nonzero',
"""
a.nonzero()
Return the indices of the elements that are non-zero.
Refer to `numpy.nonzero` for full documentation.
See Also
--------
numpy.nonzero : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('prod',
"""
a.prod(axis=None, dtype=None, out=None, keepdims=False)
Return the product of the array elements over the given axis
Refer to `numpy.prod` for full documentation.
See Also
--------
numpy.prod : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('ptp',
"""
a.ptp(axis=None, out=None)
Peak to peak (maximum - minimum) value along a given axis.
Refer to `numpy.ptp` for full documentation.
See Also
--------
numpy.ptp : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('put',
"""
a.put(indices, values, mode='raise')
Set ``a.flat[n] = values[n]`` for all `n` in indices.
Refer to `numpy.put` for full documentation.
See Also
--------
numpy.put : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'copyto',
"""
copyto(dst, src, casting='same_kind', where=None)
Copies values from one array to another, broadcasting as necessary.
Raises a TypeError if the `casting` rule is violated, and if
`where` is provided, it selects which elements to copy.
.. versionadded:: 1.7.0
Parameters
----------
dst : ndarray
The array into which values are copied.
src : array_like
The array from which values are copied.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur when copying.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
where : array_like of bool, optional
A boolean array which is broadcasted to match the dimensions
of `dst`, and selects elements to copy from `src` to `dst`
wherever it contains the value True.
""")
add_newdoc('numpy.core.multiarray', 'putmask',
"""
putmask(a, mask, values)
Changes elements of an array based on conditional and input values.
Sets ``a.flat[n] = values[n]`` for each n where ``mask.flat[n]==True``.
If `values` is not the same size as `a` and `mask` then it will repeat.
This gives behavior different from ``a[mask] = values``.
Parameters
----------
a : array_like
Target array.
mask : array_like
Boolean mask array. It has to be the same shape as `a`.
values : array_like
Values to put into `a` where `mask` is True. If `values` is smaller
than `a` it will be repeated.
See Also
--------
place, put, take, copyto
Examples
--------
>>> x = np.arange(6).reshape(2, 3)
>>> np.putmask(x, x>2, x**2)
>>> x
array([[ 0, 1, 2],
[ 9, 16, 25]])
If `values` is smaller than `a` it is repeated:
>>> x = np.arange(5)
>>> np.putmask(x, x>1, [-33, -44])
>>> x
array([ 0, 1, -33, -44, -33])
""")
add_newdoc('numpy.core.multiarray', 'ndarray', ('ravel',
"""
a.ravel([order])
Return a flattened array.
Refer to `numpy.ravel` for full documentation.
See Also
--------
numpy.ravel : equivalent function
ndarray.flat : a flat iterator on the array.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('repeat',
"""
a.repeat(repeats, axis=None)
Repeat elements of an array.
Refer to `numpy.repeat` for full documentation.
See Also
--------
numpy.repeat : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('reshape',
"""
a.reshape(shape, order='C')
Returns an array containing the same data with a new shape.
Refer to `numpy.reshape` for full documentation.
See Also
--------
numpy.reshape : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('resize',
"""
a.resize(new_shape, refcheck=True)
Change shape and size of array in-place.
Parameters
----------
new_shape : tuple of ints, or `n` ints
Shape of resized array.
refcheck : bool, optional
If False, reference count will not be checked. Default is True.
Returns
-------
None
Raises
------
ValueError
If `a` does not own its own data or references or views to it exist,
and the data memory must be changed.
SystemError
If the `order` keyword argument is specified. This behaviour is a
bug in NumPy.
See Also
--------
resize : Return a new array with the specified shape.
Notes
-----
This reallocates space for the data area if necessary.
Only contiguous arrays (data elements consecutive in memory) can be
resized.
The purpose of the reference count check is to make sure you
do not use this array as a buffer for another Python object and then
reallocate the memory. However, reference counts can increase in
other ways so if you are sure that you have not shared the memory
for this array with another Python object, then you may safely set
`refcheck` to False.
Examples
--------
Shrinking an array: array is flattened (in the order that the data are
stored in memory), resized, and reshaped:
>>> a = np.array([[0, 1], [2, 3]], order='C')
>>> a.resize((2, 1))
>>> a
array([[0],
[1]])
>>> a = np.array([[0, 1], [2, 3]], order='F')
>>> a.resize((2, 1))
>>> a
array([[0],
[2]])
Enlarging an array: as above, but missing entries are filled with zeros:
>>> b = np.array([[0, 1], [2, 3]])
>>> b.resize(2, 3) # new_shape parameter doesn't have to be a tuple
>>> b
array([[0, 1, 2],
[3, 0, 0]])
Referencing an array prevents resizing...
>>> c = a
>>> a.resize((1, 1))
Traceback (most recent call last):
...
ValueError: cannot resize an array that has been referenced ...
Unless `refcheck` is False:
>>> a.resize((1, 1), refcheck=False)
>>> a
array([[0]])
>>> c
array([[0]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('round',
"""
a.round(decimals=0, out=None)
Return `a` with each element rounded to the given number of decimals.
Refer to `numpy.around` for full documentation.
See Also
--------
numpy.around : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('searchsorted',
"""
a.searchsorted(v, side='left', sorter=None)
Find indices where elements of v should be inserted in a to maintain order.
For full documentation, see `numpy.searchsorted`
See Also
--------
numpy.searchsorted : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('setfield',
"""
a.setfield(val, dtype, offset=0)
Put a value into a specified place in a field defined by a data-type.
Place `val` into `a`'s field defined by `dtype` and beginning `offset`
bytes into the field.
Parameters
----------
val : object
Value to be placed in field.
dtype : dtype object
Data-type of the field in which to place `val`.
offset : int, optional
The number of bytes into the field at which to place `val`.
Returns
-------
None
See Also
--------
getfield
Examples
--------
>>> x = np.eye(3)
>>> x.getfield(np.float64)
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
>>> x.setfield(3, np.int32)
>>> x.getfield(np.int32)
array([[3, 3, 3],
[3, 3, 3],
[3, 3, 3]])
>>> x
array([[ 1.00000000e+000, 1.48219694e-323, 1.48219694e-323],
[ 1.48219694e-323, 1.00000000e+000, 1.48219694e-323],
[ 1.48219694e-323, 1.48219694e-323, 1.00000000e+000]])
>>> x.setfield(np.eye(3), np.int32)
>>> x
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('setflags',
"""
a.setflags(write=None, align=None, uic=None)
Set array flags WRITEABLE, ALIGNED, and UPDATEIFCOPY, respectively.
These Boolean-valued flags affect how numpy interprets the memory
area used by `a` (see Notes below). The ALIGNED flag can only
be set to True if the data is actually aligned according to the type.
The UPDATEIFCOPY flag can never be set to True. The flag WRITEABLE
can only be set to True if the array owns its own memory, or the
ultimate owner of the memory exposes a writeable buffer interface,
or is a string. (The exception for string is made so that unpickling
can be done without copying memory.)
Parameters
----------
write : bool, optional
Describes whether or not `a` can be written to.
align : bool, optional
Describes whether or not `a` is aligned properly for its type.
uic : bool, optional
Describes whether or not `a` is a copy of another "base" array.
Notes
-----
Array flags provide information about how the memory area used
for the array is to be interpreted. There are 6 Boolean flags
in use, only three of which can be changed by the user:
UPDATEIFCOPY, WRITEABLE, and ALIGNED.
WRITEABLE (W) the data area can be written to;
ALIGNED (A) the data and strides are aligned appropriately for the hardware
(as determined by the compiler);
UPDATEIFCOPY (U) this array is a copy of some other array (referenced
by .base). When this array is deallocated, the base array will be
updated with the contents of this array.
All flags can be accessed using their first (upper case) letter as well
as the full name.
Examples
--------
>>> y
array([[3, 1, 7],
[2, 0, 0],
[8, 5, 9]])
>>> y.flags
C_CONTIGUOUS : True
F_CONTIGUOUS : False
OWNDATA : True
WRITEABLE : True
ALIGNED : True
UPDATEIFCOPY : False
>>> y.setflags(write=0, align=0)
>>> y.flags
C_CONTIGUOUS : True
F_CONTIGUOUS : False
OWNDATA : True
WRITEABLE : False
ALIGNED : False
UPDATEIFCOPY : False
>>> y.setflags(uic=1)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: cannot set UPDATEIFCOPY flag to True
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('sort',
"""
a.sort(axis=-1, kind='quicksort', order=None)
Sort an array, in-place.
Parameters
----------
axis : int, optional
Axis along which to sort. Default is -1, which means sort along the
last axis.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm. Default is 'quicksort'.
order : str or list of str, optional
When `a` is an array with fields defined, this argument specifies
which fields to compare first, second, etc. A single field can
be specified as a string, and not all fields need be specified,
but unspecified fields will still be used, in the order in which
they come up in the dtype, to break ties.
See Also
--------
numpy.sort : Return a sorted copy of an array.
argsort : Indirect sort.
lexsort : Indirect stable sort on multiple keys.
searchsorted : Find elements in sorted array.
partition: Partial sort.
Notes
-----
See ``sort`` for notes on the different sorting algorithms.
Examples
--------
>>> a = np.array([[1,4], [3,1]])
>>> a.sort(axis=1)
>>> a
array([[1, 4],
[1, 3]])
>>> a.sort(axis=0)
>>> a
array([[1, 3],
[1, 4]])
Use the `order` keyword to specify a field to use when sorting a
structured array:
>>> a = np.array([('a', 2), ('c', 1)], dtype=[('x', 'S1'), ('y', int)])
>>> a.sort(order='y')
>>> a
array([('c', 1), ('a', 2)],
dtype=[('x', '|S1'), ('y', '<i4')])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('partition',
"""
a.partition(kth, axis=-1, kind='introselect', order=None)
Rearranges the elements in the array in such a way that value of the
element in kth position is in the position it would be in a sorted array.
All elements smaller than the kth element are moved before this element and
all equal or greater are moved behind it. The ordering of the elements in
the two partitions is undefined.
.. versionadded:: 1.8.0
Parameters
----------
kth : int or sequence of ints
Element index to partition by. The kth element value will be in its
final sorted position and all smaller elements will be moved before it
and all equal or greater elements behind it.
The order all elements in the partitions is undefined.
If provided with a sequence of kth it will partition all elements
indexed by kth of them into their sorted position at once.
axis : int, optional
Axis along which to sort. Default is -1, which means sort along the
last axis.
kind : {'introselect'}, optional
Selection algorithm. Default is 'introselect'.
order : str or list of str, optional
When `a` is an array with fields defined, this argument specifies
which fields to compare first, second, etc. A single field can
be specified as a string, and not all fields need be specified,
but unspecified fields will still be used, in the order in which
they come up in the dtype, to break ties.
See Also
--------
numpy.partition : Return a parititioned copy of an array.
argpartition : Indirect partition.
sort : Full sort.
Notes
-----
See ``np.partition`` for notes on the different algorithms.
Examples
--------
>>> a = np.array([3, 4, 2, 1])
>>> a.partition(a, 3)
>>> a
array([2, 1, 3, 4])
>>> a.partition((1, 3))
array([1, 2, 3, 4])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('squeeze',
"""
a.squeeze(axis=None)
Remove single-dimensional entries from the shape of `a`.
Refer to `numpy.squeeze` for full documentation.
See Also
--------
numpy.squeeze : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('std',
"""
a.std(axis=None, dtype=None, out=None, ddof=0, keepdims=False)
Returns the standard deviation of the array elements along given axis.
Refer to `numpy.std` for full documentation.
See Also
--------
numpy.std : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('sum',
"""
a.sum(axis=None, dtype=None, out=None, keepdims=False)
Return the sum of the array elements over the given axis.
Refer to `numpy.sum` for full documentation.
See Also
--------
numpy.sum : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('swapaxes',
"""
a.swapaxes(axis1, axis2)
Return a view of the array with `axis1` and `axis2` interchanged.
Refer to `numpy.swapaxes` for full documentation.
See Also
--------
numpy.swapaxes : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('take',
"""
a.take(indices, axis=None, out=None, mode='raise')
Return an array formed from the elements of `a` at the given indices.
Refer to `numpy.take` for full documentation.
See Also
--------
numpy.take : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('tofile',
"""
a.tofile(fid, sep="", format="%s")
Write array to a file as text or binary (default).
Data is always written in 'C' order, independent of the order of `a`.
The data produced by this method can be recovered using the function
fromfile().
Parameters
----------
fid : file or str
An open file object, or a string containing a filename.
sep : str
Separator between array items for text output.
If "" (empty), a binary file is written, equivalent to
``file.write(a.tobytes())``.
format : str
Format string for text file output.
Each entry in the array is formatted to text by first converting
it to the closest Python type, and then using "format" % item.
Notes
-----
This is a convenience function for quick storage of array data.
Information on endianness and precision is lost, so this method is not a
good choice for files intended to archive data or transport data between
machines with different endianness. Some of these problems can be overcome
by outputting the data as text files, at the expense of speed and file
size.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('tolist',
"""
a.tolist()
Return the array as a (possibly nested) list.
Return a copy of the array data as a (nested) Python list.
Data items are converted to the nearest compatible Python type.
Parameters
----------
none
Returns
-------
y : list
The possibly nested list of array elements.
Notes
-----
The array may be recreated, ``a = np.array(a.tolist())``.
Examples
--------
>>> a = np.array([1, 2])
>>> a.tolist()
[1, 2]
>>> a = np.array([[1, 2], [3, 4]])
>>> list(a)
[array([1, 2]), array([3, 4])]
>>> a.tolist()
[[1, 2], [3, 4]]
"""))
tobytesdoc = """
a.{name}(order='C')
Construct Python bytes containing the raw data bytes in the array.
Constructs Python bytes showing a copy of the raw contents of
data memory. The bytes object can be produced in either 'C' or 'Fortran',
or 'Any' order (the default is 'C'-order). 'Any' order means C-order
unless the F_CONTIGUOUS flag in the array is set, in which case it
means 'Fortran' order.
{deprecated}
Parameters
----------
order : {{'C', 'F', None}}, optional
Order of the data for multidimensional arrays:
C, Fortran, or the same as for the original array.
Returns
-------
s : bytes
Python bytes exhibiting a copy of `a`'s raw data.
Examples
--------
>>> x = np.array([[0, 1], [2, 3]])
>>> x.tobytes()
b'\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x03\\x00\\x00\\x00'
>>> x.tobytes('C') == x.tobytes()
True
>>> x.tobytes('F')
b'\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x03\\x00\\x00\\x00'
"""
add_newdoc('numpy.core.multiarray', 'ndarray',
('tostring', tobytesdoc.format(name='tostring',
deprecated=
'This function is a compatibility '
'alias for tobytes. Despite its '
'name it returns bytes not '
'strings.')))
add_newdoc('numpy.core.multiarray', 'ndarray',
('tobytes', tobytesdoc.format(name='tobytes',
deprecated='.. versionadded:: 1.9.0')))
add_newdoc('numpy.core.multiarray', 'ndarray', ('trace',
"""
a.trace(offset=0, axis1=0, axis2=1, dtype=None, out=None)
Return the sum along diagonals of the array.
Refer to `numpy.trace` for full documentation.
See Also
--------
numpy.trace : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('transpose',
"""
a.transpose(*axes)
Returns a view of the array with axes transposed.
For a 1-D array, this has no effect. (To change between column and
row vectors, first cast the 1-D array into a matrix object.)
For a 2-D array, this is the usual matrix transpose.
For an n-D array, if axes are given, their order indicates how the
axes are permuted (see Examples). If axes are not provided and
``a.shape = (i[0], i[1], ... i[n-2], i[n-1])``, then
``a.transpose().shape = (i[n-1], i[n-2], ... i[1], i[0])``.
Parameters
----------
axes : None, tuple of ints, or `n` ints
* None or no argument: reverses the order of the axes.
* tuple of ints: `i` in the `j`-th place in the tuple means `a`'s
`i`-th axis becomes `a.transpose()`'s `j`-th axis.
* `n` ints: same as an n-tuple of the same ints (this form is
intended simply as a "convenience" alternative to the tuple form)
Returns
-------
out : ndarray
View of `a`, with axes suitably permuted.
See Also
--------
ndarray.T : Array property returning the array transposed.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> a
array([[1, 2],
[3, 4]])
>>> a.transpose()
array([[1, 3],
[2, 4]])
>>> a.transpose((1, 0))
array([[1, 3],
[2, 4]])
>>> a.transpose(1, 0)
array([[1, 3],
[2, 4]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('var',
"""
a.var(axis=None, dtype=None, out=None, ddof=0, keepdims=False)
Returns the variance of the array elements, along given axis.
Refer to `numpy.var` for full documentation.
See Also
--------
numpy.var : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('view',
"""
a.view(dtype=None, type=None)
New view of array with the same data.
Parameters
----------
dtype : data-type or ndarray sub-class, optional
Data-type descriptor of the returned view, e.g., float32 or int16. The
default, None, results in the view having the same data-type as `a`.
This argument can also be specified as an ndarray sub-class, which
then specifies the type of the returned object (this is equivalent to
setting the ``type`` parameter).
type : Python type, optional
Type of the returned view, e.g., ndarray or matrix. Again, the
default None results in type preservation.
Notes
-----
``a.view()`` is used two different ways:
``a.view(some_dtype)`` or ``a.view(dtype=some_dtype)`` constructs a view
of the array's memory with a different data-type. This can cause a
reinterpretation of the bytes of memory.
``a.view(ndarray_subclass)`` or ``a.view(type=ndarray_subclass)`` just
returns an instance of `ndarray_subclass` that looks at the same array
(same shape, dtype, etc.) This does not cause a reinterpretation of the
memory.
For ``a.view(some_dtype)``, if ``some_dtype`` has a different number of
bytes per entry than the previous dtype (for example, converting a
regular array to a structured array), then the behavior of the view
cannot be predicted just from the superficial appearance of ``a`` (shown
by ``print(a)``). It also depends on exactly how ``a`` is stored in
memory. Therefore if ``a`` is C-ordered versus fortran-ordered, versus
defined as a slice or transpose, etc., the view may give different
results.
Examples
--------
>>> x = np.array([(1, 2)], dtype=[('a', np.int8), ('b', np.int8)])
Viewing array data using a different type and dtype:
>>> y = x.view(dtype=np.int16, type=np.matrix)
>>> y
matrix([[513]], dtype=int16)
>>> print type(y)
<class 'numpy.matrixlib.defmatrix.matrix'>
Creating a view on a structured array so it can be used in calculations
>>> x = np.array([(1, 2),(3,4)], dtype=[('a', np.int8), ('b', np.int8)])
>>> xv = x.view(dtype=np.int8).reshape(-1,2)
>>> xv
array([[1, 2],
[3, 4]], dtype=int8)
>>> xv.mean(0)
array([ 2., 3.])
Making changes to the view changes the underlying array
>>> xv[0,1] = 20
>>> print x
[(1, 20) (3, 4)]
Using a view to convert an array to a recarray:
>>> z = x.view(np.recarray)
>>> z.a
array([1], dtype=int8)
Views share data:
>>> x[0] = (9, 10)
>>> z[0]
(9, 10)
Views that change the dtype size (bytes per entry) should normally be
avoided on arrays defined by slices, transposes, fortran-ordering, etc.:
>>> x = np.array([[1,2,3],[4,5,6]], dtype=np.int16)
>>> y = x[:, 0:2]
>>> y
array([[1, 2],
[4, 5]], dtype=int16)
>>> y.view(dtype=[('width', np.int16), ('length', np.int16)])
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: new type not compatible with array.
>>> z = y.copy()
>>> z.view(dtype=[('width', np.int16), ('length', np.int16)])
array([[(1, 2)],
[(4, 5)]], dtype=[('width', '<i2'), ('length', '<i2')])
"""))
##############################################################################
#
# umath functions
#
##############################################################################
add_newdoc('numpy.core.umath', 'frompyfunc',
"""
frompyfunc(func, nin, nout)
Takes an arbitrary Python function and returns a Numpy ufunc.
Can be used, for example, to add broadcasting to a built-in Python
function (see Examples section).
Parameters
----------
func : Python function object
An arbitrary Python function.
nin : int
The number of input arguments.
nout : int
The number of objects returned by `func`.
Returns
-------
out : ufunc
Returns a Numpy universal function (``ufunc``) object.
Notes
-----
The returned ufunc always returns PyObject arrays.
Examples
--------
Use frompyfunc to add broadcasting to the Python function ``oct``:
>>> oct_array = np.frompyfunc(oct, 1, 1)
>>> oct_array(np.array((10, 30, 100)))
array([012, 036, 0144], dtype=object)
>>> np.array((oct(10), oct(30), oct(100))) # for comparison
array(['012', '036', '0144'],
dtype='|S4')
""")
add_newdoc('numpy.core.umath', 'geterrobj',
"""
geterrobj()
Return the current object that defines floating-point error handling.
The error object contains all information that defines the error handling
behavior in Numpy. `geterrobj` is used internally by the other
functions that get and set error handling behavior (`geterr`, `seterr`,
`geterrcall`, `seterrcall`).
Returns
-------
errobj : list
The error object, a list containing three elements:
[internal numpy buffer size, error mask, error callback function].
The error mask is a single integer that holds the treatment information
on all four floating point errors. The information for each error type
is contained in three bits of the integer. If we print it in base 8, we
can see what treatment is set for "invalid", "under", "over", and
"divide" (in that order). The printed string can be interpreted with
* 0 : 'ignore'
* 1 : 'warn'
* 2 : 'raise'
* 3 : 'call'
* 4 : 'print'
* 5 : 'log'
See Also
--------
seterrobj, seterr, geterr, seterrcall, geterrcall
getbufsize, setbufsize
Notes
-----
For complete documentation of the types of floating-point exceptions and
treatment options, see `seterr`.
Examples
--------
>>> np.geterrobj() # first get the defaults
[10000, 0, None]
>>> def err_handler(type, flag):
... print "Floating point error (%s), with flag %s" % (type, flag)
...
>>> old_bufsize = np.setbufsize(20000)
>>> old_err = np.seterr(divide='raise')
>>> old_handler = np.seterrcall(err_handler)
>>> np.geterrobj()
[20000, 2, <function err_handler at 0x91dcaac>]
>>> old_err = np.seterr(all='ignore')
>>> np.base_repr(np.geterrobj()[1], 8)
'0'
>>> old_err = np.seterr(divide='warn', over='log', under='call',
invalid='print')
>>> np.base_repr(np.geterrobj()[1], 8)
'4351'
""")
add_newdoc('numpy.core.umath', 'seterrobj',
"""
seterrobj(errobj)
Set the object that defines floating-point error handling.
The error object contains all information that defines the error handling
behavior in Numpy. `seterrobj` is used internally by the other
functions that set error handling behavior (`seterr`, `seterrcall`).
Parameters
----------
errobj : list
The error object, a list containing three elements:
[internal numpy buffer size, error mask, error callback function].
The error mask is a single integer that holds the treatment information
on all four floating point errors. The information for each error type
is contained in three bits of the integer. If we print it in base 8, we
can see what treatment is set for "invalid", "under", "over", and
"divide" (in that order). The printed string can be interpreted with
* 0 : 'ignore'
* 1 : 'warn'
* 2 : 'raise'
* 3 : 'call'
* 4 : 'print'
* 5 : 'log'
See Also
--------
geterrobj, seterr, geterr, seterrcall, geterrcall
getbufsize, setbufsize
Notes
-----
For complete documentation of the types of floating-point exceptions and
treatment options, see `seterr`.
Examples
--------
>>> old_errobj = np.geterrobj() # first get the defaults
>>> old_errobj
[10000, 0, None]
>>> def err_handler(type, flag):
... print "Floating point error (%s), with flag %s" % (type, flag)
...
>>> new_errobj = [20000, 12, err_handler]
>>> np.seterrobj(new_errobj)
>>> np.base_repr(12, 8) # int for divide=4 ('print') and over=1 ('warn')
'14'
>>> np.geterr()
{'over': 'warn', 'divide': 'print', 'invalid': 'ignore', 'under': 'ignore'}
>>> np.geterrcall() is err_handler
True
""")
##############################################################################
#
# compiled_base functions
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'digitize',
"""
digitize(x, bins, right=False)
Return the indices of the bins to which each value in input array belongs.
Each index ``i`` returned is such that ``bins[i-1] <= x < bins[i]`` if
`bins` is monotonically increasing, or ``bins[i-1] > x >= bins[i]`` if
`bins` is monotonically decreasing. If values in `x` are beyond the
bounds of `bins`, 0 or ``len(bins)`` is returned as appropriate. If right
is True, then the right bin is closed so that the index ``i`` is such
that ``bins[i-1] < x <= bins[i]`` or bins[i-1] >= x > bins[i]`` if `bins`
is monotonically increasing or decreasing, respectively.
Parameters
----------
x : array_like
Input array to be binned. Prior to Numpy 1.10.0, this array had to
be 1-dimensional, but can now have any shape.
bins : array_like
Array of bins. It has to be 1-dimensional and monotonic.
right : bool, optional
Indicating whether the intervals include the right or the left bin
edge. Default behavior is (right==False) indicating that the interval
does not include the right edge. The left bin end is open in this
case, i.e., bins[i-1] <= x < bins[i] is the default behavior for
monotonically increasing bins.
Returns
-------
out : ndarray of ints
Output array of indices, of same shape as `x`.
Raises
------
ValueError
If `bins` is not monotonic.
TypeError
If the type of the input is complex.
See Also
--------
bincount, histogram, unique
Notes
-----
If values in `x` are such that they fall outside the bin range,
attempting to index `bins` with the indices that `digitize` returns
will result in an IndexError.
.. versionadded:: 1.10.0
`np.digitize` is implemented in terms of `np.searchsorted`. This means
that a binary search is used to bin the values, which scales much better
for larger number of bins than the previous linear search. It also removes
the requirement for the input array to be 1-dimensional.
Examples
--------
>>> x = np.array([0.2, 6.4, 3.0, 1.6])
>>> bins = np.array([0.0, 1.0, 2.5, 4.0, 10.0])
>>> inds = np.digitize(x, bins)
>>> inds
array([1, 4, 3, 2])
>>> for n in range(x.size):
... print bins[inds[n]-1], "<=", x[n], "<", bins[inds[n]]
...
0.0 <= 0.2 < 1.0
4.0 <= 6.4 < 10.0
2.5 <= 3.0 < 4.0
1.0 <= 1.6 < 2.5
>>> x = np.array([1.2, 10.0, 12.4, 15.5, 20.])
>>> bins = np.array([0, 5, 10, 15, 20])
>>> np.digitize(x,bins,right=True)
array([1, 2, 3, 4, 4])
>>> np.digitize(x,bins,right=False)
array([1, 3, 3, 4, 5])
""")
add_newdoc('numpy.core.multiarray', 'bincount',
"""
bincount(x, weights=None, minlength=None)
Count number of occurrences of each value in array of non-negative ints.
The number of bins (of size 1) is one larger than the largest value in
`x`. If `minlength` is specified, there will be at least this number
of bins in the output array (though it will be longer if necessary,
depending on the contents of `x`).
Each bin gives the number of occurrences of its index value in `x`.
If `weights` is specified the input array is weighted by it, i.e. if a
value ``n`` is found at position ``i``, ``out[n] += weight[i]`` instead
of ``out[n] += 1``.
Parameters
----------
x : array_like, 1 dimension, nonnegative ints
Input array.
weights : array_like, optional
Weights, array of the same shape as `x`.
minlength : int, optional
.. versionadded:: 1.6.0
A minimum number of bins for the output array.
Returns
-------
out : ndarray of ints
The result of binning the input array.
The length of `out` is equal to ``np.amax(x)+1``.
Raises
------
ValueError
If the input is not 1-dimensional, or contains elements with negative
values, or if `minlength` is non-positive.
TypeError
If the type of the input is float or complex.
See Also
--------
histogram, digitize, unique
Examples
--------
>>> np.bincount(np.arange(5))
array([1, 1, 1, 1, 1])
>>> np.bincount(np.array([0, 1, 1, 3, 2, 1, 7]))
array([1, 3, 1, 1, 0, 0, 0, 1])
>>> x = np.array([0, 1, 1, 3, 2, 1, 7, 23])
>>> np.bincount(x).size == np.amax(x)+1
True
The input array needs to be of integer dtype, otherwise a
TypeError is raised:
>>> np.bincount(np.arange(5, dtype=np.float))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: array cannot be safely cast to required type
A possible use of ``bincount`` is to perform sums over
variable-size chunks of an array, using the ``weights`` keyword.
>>> w = np.array([0.3, 0.5, 0.2, 0.7, 1., -0.6]) # weights
>>> x = np.array([0, 1, 1, 2, 2, 2])
>>> np.bincount(x, weights=w)
array([ 0.3, 0.7, 1.1])
""")
add_newdoc('numpy.core.multiarray', 'ravel_multi_index',
"""
ravel_multi_index(multi_index, dims, mode='raise', order='C')
Converts a tuple of index arrays into an array of flat
indices, applying boundary modes to the multi-index.
Parameters
----------
multi_index : tuple of array_like
A tuple of integer arrays, one array for each dimension.
dims : tuple of ints
The shape of array into which the indices from ``multi_index`` apply.
mode : {'raise', 'wrap', 'clip'}, optional
Specifies how out-of-bounds indices are handled. Can specify
either one mode or a tuple of modes, one mode per index.
* 'raise' -- raise an error (default)
* 'wrap' -- wrap around
* 'clip' -- clip to the range
In 'clip' mode, a negative index which would normally
wrap will clip to 0 instead.
order : {'C', 'F'}, optional
Determines whether the multi-index should be viewed as indexing in
C (row-major) order or FORTRAN (column-major) order.
Returns
-------
raveled_indices : ndarray
An array of indices into the flattened version of an array
of dimensions ``dims``.
See Also
--------
unravel_index
Notes
-----
.. versionadded:: 1.6.0
Examples
--------
>>> arr = np.array([[3,6,6],[4,5,1]])
>>> np.ravel_multi_index(arr, (7,6))
array([22, 41, 37])
>>> np.ravel_multi_index(arr, (7,6), order='F')
array([31, 41, 13])
>>> np.ravel_multi_index(arr, (4,6), mode='clip')
array([22, 23, 19])
>>> np.ravel_multi_index(arr, (4,4), mode=('clip','wrap'))
array([12, 13, 13])
>>> np.ravel_multi_index((3,1,4,1), (6,7,8,9))
1621
""")
add_newdoc('numpy.core.multiarray', 'unravel_index',
"""
unravel_index(indices, dims, order='C')
Converts a flat index or array of flat indices into a tuple
of coordinate arrays.
Parameters
----------
indices : array_like
An integer array whose elements are indices into the flattened
version of an array of dimensions ``dims``. Before version 1.6.0,
this function accepted just one index value.
dims : tuple of ints
The shape of the array to use for unraveling ``indices``.
order : {'C', 'F'}, optional
.. versionadded:: 1.6.0
Determines whether the indices should be viewed as indexing in
C (row-major) order or FORTRAN (column-major) order.
Returns
-------
unraveled_coords : tuple of ndarray
Each array in the tuple has the same shape as the ``indices``
array.
See Also
--------
ravel_multi_index
Examples
--------
>>> np.unravel_index([22, 41, 37], (7,6))
(array([3, 6, 6]), array([4, 5, 1]))
>>> np.unravel_index([31, 41, 13], (7,6), order='F')
(array([3, 6, 6]), array([4, 5, 1]))
>>> np.unravel_index(1621, (6,7,8,9))
(3, 1, 4, 1)
""")
add_newdoc('numpy.core.multiarray', 'add_docstring',
"""
add_docstring(obj, docstring)
Add a docstring to a built-in obj if possible.
If the obj already has a docstring raise a RuntimeError
If this routine does not know how to add a docstring to the object
raise a TypeError
""")
add_newdoc('numpy.core.umath', '_add_newdoc_ufunc',
"""
add_ufunc_docstring(ufunc, new_docstring)
Replace the docstring for a ufunc with new_docstring.
This method will only work if the current docstring for
the ufunc is NULL. (At the C level, i.e. when ufunc->doc is NULL.)
Parameters
----------
ufunc : numpy.ufunc
A ufunc whose current doc is NULL.
new_docstring : string
The new docstring for the ufunc.
Notes
-----
This method allocates memory for new_docstring on
the heap. Technically this creates a mempory leak, since this
memory will not be reclaimed until the end of the program
even if the ufunc itself is removed. However this will only
be a problem if the user is repeatedly creating ufuncs with
no documentation, adding documentation via add_newdoc_ufunc,
and then throwing away the ufunc.
""")
add_newdoc('numpy.core.multiarray', 'packbits',
"""
packbits(myarray, axis=None)
Packs the elements of a binary-valued array into bits in a uint8 array.
The result is padded to full bytes by inserting zero bits at the end.
Parameters
----------
myarray : array_like
An integer type array whose elements should be packed to bits.
axis : int, optional
The dimension over which bit-packing is done.
``None`` implies packing the flattened array.
Returns
-------
packed : ndarray
Array of type uint8 whose elements represent bits corresponding to the
logical (0 or nonzero) value of the input elements. The shape of
`packed` has the same number of dimensions as the input (unless `axis`
is None, in which case the output is 1-D).
See Also
--------
unpackbits: Unpacks elements of a uint8 array into a binary-valued output
array.
Examples
--------
>>> a = np.array([[[1,0,1],
... [0,1,0]],
... [[1,1,0],
... [0,0,1]]])
>>> b = np.packbits(a, axis=-1)
>>> b
array([[[160],[64]],[[192],[32]]], dtype=uint8)
Note that in binary 160 = 1010 0000, 64 = 0100 0000, 192 = 1100 0000,
and 32 = 0010 0000.
""")
add_newdoc('numpy.core.multiarray', 'unpackbits',
"""
unpackbits(myarray, axis=None)
Unpacks elements of a uint8 array into a binary-valued output array.
Each element of `myarray` represents a bit-field that should be unpacked
into a binary-valued output array. The shape of the output array is either
1-D (if `axis` is None) or the same shape as the input array with unpacking
done along the axis specified.
Parameters
----------
myarray : ndarray, uint8 type
Input array.
axis : int, optional
Unpacks along this axis.
Returns
-------
unpacked : ndarray, uint8 type
The elements are binary-valued (0 or 1).
See Also
--------
packbits : Packs the elements of a binary-valued array into bits in a uint8
array.
Examples
--------
>>> a = np.array([[2], [7], [23]], dtype=np.uint8)
>>> a
array([[ 2],
[ 7],
[23]], dtype=uint8)
>>> b = np.unpackbits(a, axis=1)
>>> b
array([[0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 0, 1, 1, 1]], dtype=uint8)
""")
##############################################################################
#
# Documentation for ufunc attributes and methods
#
##############################################################################
##############################################################################
#
# ufunc object
#
##############################################################################
add_newdoc('numpy.core', 'ufunc',
"""
Functions that operate element by element on whole arrays.
To see the documentation for a specific ufunc, use np.info(). For
example, np.info(np.sin). Because ufuncs are written in C
(for speed) and linked into Python with NumPy's ufunc facility,
Python's help() function finds this page whenever help() is called
on a ufunc.
A detailed explanation of ufuncs can be found in the "ufuncs.rst"
file in the NumPy reference guide.
Unary ufuncs:
=============
op(X, out=None)
Apply op to X elementwise
Parameters
----------
X : array_like
Input array.
out : array_like
An array to store the output. Must be the same shape as `X`.
Returns
-------
r : array_like
`r` will have the same shape as `X`; if out is provided, `r`
will be equal to out.
Binary ufuncs:
==============
op(X, Y, out=None)
Apply `op` to `X` and `Y` elementwise. May "broadcast" to make
the shapes of `X` and `Y` congruent.
The broadcasting rules are:
* Dimensions of length 1 may be prepended to either array.
* Arrays may be repeated along dimensions of length 1.
Parameters
----------
X : array_like
First input array.
Y : array_like
Second input array.
out : array_like
An array to store the output. Must be the same shape as the
output would have.
Returns
-------
r : array_like
The return value; if out is provided, `r` will be equal to out.
""")
##############################################################################
#
# ufunc attributes
#
##############################################################################
add_newdoc('numpy.core', 'ufunc', ('identity',
"""
The identity value.
Data attribute containing the identity element for the ufunc, if it has one.
If it does not, the attribute value is None.
Examples
--------
>>> np.add.identity
0
>>> np.multiply.identity
1
>>> np.power.identity
1
>>> print np.exp.identity
None
"""))
add_newdoc('numpy.core', 'ufunc', ('nargs',
"""
The number of arguments.
Data attribute containing the number of arguments the ufunc takes, including
optional ones.
Notes
-----
Typically this value will be one more than what you might expect because all
ufuncs take the optional "out" argument.
Examples
--------
>>> np.add.nargs
3
>>> np.multiply.nargs
3
>>> np.power.nargs
3
>>> np.exp.nargs
2
"""))
add_newdoc('numpy.core', 'ufunc', ('nin',
"""
The number of inputs.
Data attribute containing the number of arguments the ufunc treats as input.
Examples
--------
>>> np.add.nin
2
>>> np.multiply.nin
2
>>> np.power.nin
2
>>> np.exp.nin
1
"""))
add_newdoc('numpy.core', 'ufunc', ('nout',
"""
The number of outputs.
Data attribute containing the number of arguments the ufunc treats as output.
Notes
-----
Since all ufuncs can take output arguments, this will always be (at least) 1.
Examples
--------
>>> np.add.nout
1
>>> np.multiply.nout
1
>>> np.power.nout
1
>>> np.exp.nout
1
"""))
add_newdoc('numpy.core', 'ufunc', ('ntypes',
"""
The number of types.
The number of numerical NumPy types - of which there are 18 total - on which
the ufunc can operate.
See Also
--------
numpy.ufunc.types
Examples
--------
>>> np.add.ntypes
18
>>> np.multiply.ntypes
18
>>> np.power.ntypes
17
>>> np.exp.ntypes
7
>>> np.remainder.ntypes
14
"""))
add_newdoc('numpy.core', 'ufunc', ('types',
"""
Returns a list with types grouped input->output.
Data attribute listing the data-type "Domain-Range" groupings the ufunc can
deliver. The data-types are given using the character codes.
See Also
--------
numpy.ufunc.ntypes
Examples
--------
>>> np.add.types
['??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l',
'LL->L', 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D',
'GG->G', 'OO->O']
>>> np.multiply.types
['??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l',
'LL->L', 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D',
'GG->G', 'OO->O']
>>> np.power.types
['bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', 'LL->L',
'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D', 'GG->G',
'OO->O']
>>> np.exp.types
['f->f', 'd->d', 'g->g', 'F->F', 'D->D', 'G->G', 'O->O']
>>> np.remainder.types
['bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', 'LL->L',
'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'OO->O']
"""))
##############################################################################
#
# ufunc methods
#
##############################################################################
add_newdoc('numpy.core', 'ufunc', ('reduce',
"""
reduce(a, axis=0, dtype=None, out=None, keepdims=False)
Reduces `a`'s dimension by one, by applying ufunc along one axis.
Let :math:`a.shape = (N_0, ..., N_i, ..., N_{M-1})`. Then
:math:`ufunc.reduce(a, axis=i)[k_0, ..,k_{i-1}, k_{i+1}, .., k_{M-1}]` =
the result of iterating `j` over :math:`range(N_i)`, cumulatively applying
ufunc to each :math:`a[k_0, ..,k_{i-1}, j, k_{i+1}, .., k_{M-1}]`.
For a one-dimensional array, reduce produces results equivalent to:
::
r = op.identity # op = ufunc
for i in range(len(A)):
r = op(r, A[i])
return r
For example, add.reduce() is equivalent to sum().
Parameters
----------
a : array_like
The array to act on.
axis : None or int or tuple of ints, optional
Axis or axes along which a reduction is performed.
The default (`axis` = 0) is perform a reduction over the first
dimension of the input array. `axis` may be negative, in
which case it counts from the last to the first axis.
.. versionadded:: 1.7.0
If this is `None`, a reduction is performed over all the axes.
If this is a tuple of ints, a reduction is performed on multiple
axes, instead of a single axis or all the axes as before.
For operations which are either not commutative or not associative,
doing a reduction over multiple axes is not well-defined. The
ufuncs do not currently raise an exception in this case, but will
likely do so in the future.
dtype : data-type code, optional
The type used to represent the intermediate results. Defaults
to the data-type of the output array if this is provided, or
the data-type of the input array if no output array is provided.
out : ndarray, optional
A location into which the result is stored. If not provided, a
freshly-allocated array is returned.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
.. versionadded:: 1.7.0
Returns
-------
r : ndarray
The reduced array. If `out` was supplied, `r` is a reference to it.
Examples
--------
>>> np.multiply.reduce([2,3,5])
30
A multi-dimensional array example:
>>> X = np.arange(8).reshape((2,2,2))
>>> X
array([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
>>> np.add.reduce(X, 0)
array([[ 4, 6],
[ 8, 10]])
>>> np.add.reduce(X) # confirm: default axis value is 0
array([[ 4, 6],
[ 8, 10]])
>>> np.add.reduce(X, 1)
array([[ 2, 4],
[10, 12]])
>>> np.add.reduce(X, 2)
array([[ 1, 5],
[ 9, 13]])
"""))
add_newdoc('numpy.core', 'ufunc', ('accumulate',
"""
accumulate(array, axis=0, dtype=None, out=None)
Accumulate the result of applying the operator to all elements.
For a one-dimensional array, accumulate produces results equivalent to::
r = np.empty(len(A))
t = op.identity # op = the ufunc being applied to A's elements
for i in range(len(A)):
t = op(t, A[i])
r[i] = t
return r
For example, add.accumulate() is equivalent to np.cumsum().
For a multi-dimensional array, accumulate is applied along only one
axis (axis zero by default; see Examples below) so repeated use is
necessary if one wants to accumulate over multiple axes.
Parameters
----------
array : array_like
The array to act on.
axis : int, optional
The axis along which to apply the accumulation; default is zero.
dtype : data-type code, optional
The data-type used to represent the intermediate results. Defaults
to the data-type of the output array if such is provided, or the
the data-type of the input array if no output array is provided.
out : ndarray, optional
A location into which the result is stored. If not provided a
freshly-allocated array is returned.
Returns
-------
r : ndarray
The accumulated values. If `out` was supplied, `r` is a reference to
`out`.
Examples
--------
1-D array examples:
>>> np.add.accumulate([2, 3, 5])
array([ 2, 5, 10])
>>> np.multiply.accumulate([2, 3, 5])
array([ 2, 6, 30])
2-D array examples:
>>> I = np.eye(2)
>>> I
array([[ 1., 0.],
[ 0., 1.]])
Accumulate along axis 0 (rows), down columns:
>>> np.add.accumulate(I, 0)
array([[ 1., 0.],
[ 1., 1.]])
>>> np.add.accumulate(I) # no axis specified = axis zero
array([[ 1., 0.],
[ 1., 1.]])
Accumulate along axis 1 (columns), through rows:
>>> np.add.accumulate(I, 1)
array([[ 1., 1.],
[ 0., 1.]])
"""))
add_newdoc('numpy.core', 'ufunc', ('reduceat',
"""
reduceat(a, indices, axis=0, dtype=None, out=None)
Performs a (local) reduce with specified slices over a single axis.
For i in ``range(len(indices))``, `reduceat` computes
``ufunc.reduce(a[indices[i]:indices[i+1]])``, which becomes the i-th
generalized "row" parallel to `axis` in the final result (i.e., in a
2-D array, for example, if `axis = 0`, it becomes the i-th row, but if
`axis = 1`, it becomes the i-th column). There are three exceptions to this:
* when ``i = len(indices) - 1`` (so for the last index),
``indices[i+1] = a.shape[axis]``.
* if ``indices[i] >= indices[i + 1]``, the i-th generalized "row" is
simply ``a[indices[i]]``.
* if ``indices[i] >= len(a)`` or ``indices[i] < 0``, an error is raised.
The shape of the output depends on the size of `indices`, and may be
larger than `a` (this happens if ``len(indices) > a.shape[axis]``).
Parameters
----------
a : array_like
The array to act on.
indices : array_like
Paired indices, comma separated (not colon), specifying slices to
reduce.
axis : int, optional
The axis along which to apply the reduceat.
dtype : data-type code, optional
The type used to represent the intermediate results. Defaults
to the data type of the output array if this is provided, or
the data type of the input array if no output array is provided.
out : ndarray, optional
A location into which the result is stored. If not provided a
freshly-allocated array is returned.
Returns
-------
r : ndarray
The reduced values. If `out` was supplied, `r` is a reference to
`out`.
Notes
-----
A descriptive example:
If `a` is 1-D, the function `ufunc.accumulate(a)` is the same as
``ufunc.reduceat(a, indices)[::2]`` where `indices` is
``range(len(array) - 1)`` with a zero placed
in every other element:
``indices = zeros(2 * len(a) - 1)``, ``indices[1::2] = range(1, len(a))``.
Don't be fooled by this attribute's name: `reduceat(a)` is not
necessarily smaller than `a`.
Examples
--------
To take the running sum of four successive values:
>>> np.add.reduceat(np.arange(8),[0,4, 1,5, 2,6, 3,7])[::2]
array([ 6, 10, 14, 18])
A 2-D example:
>>> x = np.linspace(0, 15, 16).reshape(4,4)
>>> x
array([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[ 12., 13., 14., 15.]])
::
# reduce such that the result has the following five rows:
# [row1 + row2 + row3]
# [row4]
# [row2]
# [row3]
# [row1 + row2 + row3 + row4]
>>> np.add.reduceat(x, [0, 3, 1, 2, 0])
array([[ 12., 15., 18., 21.],
[ 12., 13., 14., 15.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[ 24., 28., 32., 36.]])
::
# reduce such that result has the following two columns:
# [col1 * col2 * col3, col4]
>>> np.multiply.reduceat(x, [0, 3], 1)
array([[ 0., 3.],
[ 120., 7.],
[ 720., 11.],
[ 2184., 15.]])
"""))
add_newdoc('numpy.core', 'ufunc', ('outer',
"""
outer(A, B)
Apply the ufunc `op` to all pairs (a, b) with a in `A` and b in `B`.
Let ``M = A.ndim``, ``N = B.ndim``. Then the result, `C`, of
``op.outer(A, B)`` is an array of dimension M + N such that:
.. math:: C[i_0, ..., i_{M-1}, j_0, ..., j_{N-1}] =
op(A[i_0, ..., i_{M-1}], B[j_0, ..., j_{N-1}])
For `A` and `B` one-dimensional, this is equivalent to::
r = empty(len(A),len(B))
for i in range(len(A)):
for j in range(len(B)):
r[i,j] = op(A[i], B[j]) # op = ufunc in question
Parameters
----------
A : array_like
First array
B : array_like
Second array
Returns
-------
r : ndarray
Output array
See Also
--------
numpy.outer
Examples
--------
>>> np.multiply.outer([1, 2, 3], [4, 5, 6])
array([[ 4, 5, 6],
[ 8, 10, 12],
[12, 15, 18]])
A multi-dimensional example:
>>> A = np.array([[1, 2, 3], [4, 5, 6]])
>>> A.shape
(2, 3)
>>> B = np.array([[1, 2, 3, 4]])
>>> B.shape
(1, 4)
>>> C = np.multiply.outer(A, B)
>>> C.shape; C
(2, 3, 1, 4)
array([[[[ 1, 2, 3, 4]],
[[ 2, 4, 6, 8]],
[[ 3, 6, 9, 12]]],
[[[ 4, 8, 12, 16]],
[[ 5, 10, 15, 20]],
[[ 6, 12, 18, 24]]]])
"""))
add_newdoc('numpy.core', 'ufunc', ('at',
"""
at(a, indices, b=None)
Performs unbuffered in place operation on operand 'a' for elements
specified by 'indices'. For addition ufunc, this method is equivalent to
`a[indices] += b`, except that results are accumulated for elements that
are indexed more than once. For example, `a[[0,0]] += 1` will only
increment the first element once because of buffering, whereas
`add.at(a, [0,0], 1)` will increment the first element twice.
.. versionadded:: 1.8.0
Parameters
----------
a : array_like
The array to perform in place operation on.
indices : array_like or tuple
Array like index object or slice object for indexing into first
operand. If first operand has multiple dimensions, indices can be a
tuple of array like index objects or slice objects.
b : array_like
Second operand for ufuncs requiring two operands. Operand must be
broadcastable over first operand after indexing or slicing.
Examples
--------
Set items 0 and 1 to their negative values:
>>> a = np.array([1, 2, 3, 4])
>>> np.negative.at(a, [0, 1])
>>> print(a)
array([-1, -2, 3, 4])
::
Increment items 0 and 1, and increment item 2 twice:
>>> a = np.array([1, 2, 3, 4])
>>> np.add.at(a, [0, 1, 2, 2], 1)
>>> print(a)
array([2, 3, 5, 4])
::
Add items 0 and 1 in first array to second array,
and store results in first array:
>>> a = np.array([1, 2, 3, 4])
>>> b = np.array([1, 2])
>>> np.add.at(a, [0, 1], b)
>>> print(a)
array([2, 4, 3, 4])
"""))
##############################################################################
#
# Documentation for dtype attributes and methods
#
##############################################################################
##############################################################################
#
# dtype object
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'dtype',
"""
dtype(obj, align=False, copy=False)
Create a data type object.
A numpy array is homogeneous, and contains elements described by a
dtype object. A dtype object can be constructed from different
combinations of fundamental numeric types.
Parameters
----------
obj
Object to be converted to a data type object.
align : bool, optional
Add padding to the fields to match what a C compiler would output
for a similar C-struct. Can be ``True`` only if `obj` is a dictionary
or a comma-separated string. If a struct dtype is being created,
this also sets a sticky alignment flag ``isalignedstruct``.
copy : bool, optional
Make a new copy of the data-type object. If ``False``, the result
may just be a reference to a built-in data-type object.
See also
--------
result_type
Examples
--------
Using array-scalar type:
>>> np.dtype(np.int16)
dtype('int16')
Structured type, one field name 'f1', containing int16:
>>> np.dtype([('f1', np.int16)])
dtype([('f1', '<i2')])
Structured type, one field named 'f1', in itself containing a structured
type with one field:
>>> np.dtype([('f1', [('f1', np.int16)])])
dtype([('f1', [('f1', '<i2')])])
Structured type, two fields: the first field contains an unsigned int, the
second an int32:
>>> np.dtype([('f1', np.uint), ('f2', np.int32)])
dtype([('f1', '<u4'), ('f2', '<i4')])
Using array-protocol type strings:
>>> np.dtype([('a','f8'),('b','S10')])
dtype([('a', '<f8'), ('b', '|S10')])
Using comma-separated field formats. The shape is (2,3):
>>> np.dtype("i4, (2,3)f8")
dtype([('f0', '<i4'), ('f1', '<f8', (2, 3))])
Using tuples. ``int`` is a fixed type, 3 the field's shape. ``void``
is a flexible type, here of size 10:
>>> np.dtype([('hello',(np.int,3)),('world',np.void,10)])
dtype([('hello', '<i4', 3), ('world', '|V10')])
Subdivide ``int16`` into 2 ``int8``'s, called x and y. 0 and 1 are
the offsets in bytes:
>>> np.dtype((np.int16, {'x':(np.int8,0), 'y':(np.int8,1)}))
dtype(('<i2', [('x', '|i1'), ('y', '|i1')]))
Using dictionaries. Two fields named 'gender' and 'age':
>>> np.dtype({'names':['gender','age'], 'formats':['S1',np.uint8]})
dtype([('gender', '|S1'), ('age', '|u1')])
Offsets in bytes, here 0 and 25:
>>> np.dtype({'surname':('S25',0),'age':(np.uint8,25)})
dtype([('surname', '|S25'), ('age', '|u1')])
""")
##############################################################################
#
# dtype attributes
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'dtype', ('alignment',
"""
The required alignment (bytes) of this data-type according to the compiler.
More information is available in the C-API section of the manual.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('byteorder',
"""
A character indicating the byte-order of this data-type object.
One of:
=== ==============
'=' native
'<' little-endian
'>' big-endian
'|' not applicable
=== ==============
All built-in data-type objects have byteorder either '=' or '|'.
Examples
--------
>>> dt = np.dtype('i2')
>>> dt.byteorder
'='
>>> # endian is not relevant for 8 bit numbers
>>> np.dtype('i1').byteorder
'|'
>>> # or ASCII strings
>>> np.dtype('S2').byteorder
'|'
>>> # Even if specific code is given, and it is native
>>> # '=' is the byteorder
>>> import sys
>>> sys_is_le = sys.byteorder == 'little'
>>> native_code = sys_is_le and '<' or '>'
>>> swapped_code = sys_is_le and '>' or '<'
>>> dt = np.dtype(native_code + 'i2')
>>> dt.byteorder
'='
>>> # Swapped code shows up as itself
>>> dt = np.dtype(swapped_code + 'i2')
>>> dt.byteorder == swapped_code
True
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('char',
"""A unique character code for each of the 21 different built-in types."""))
add_newdoc('numpy.core.multiarray', 'dtype', ('descr',
"""
Array-interface compliant full description of the data-type.
The format is that required by the 'descr' key in the
`__array_interface__` attribute.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('fields',
"""
Dictionary of named fields defined for this data type, or ``None``.
The dictionary is indexed by keys that are the names of the fields.
Each entry in the dictionary is a tuple fully describing the field::
(dtype, offset[, title])
If present, the optional title can be any object (if it is a string
or unicode then it will also be a key in the fields dictionary,
otherwise it's meta-data). Notice also that the first two elements
of the tuple can be passed directly as arguments to the ``ndarray.getfield``
and ``ndarray.setfield`` methods.
See Also
--------
ndarray.getfield, ndarray.setfield
Examples
--------
>>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))])
>>> print dt.fields
{'grades': (dtype(('float64',(2,))), 16), 'name': (dtype('|S16'), 0)}
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('flags',
"""
Bit-flags describing how this data type is to be interpreted.
Bit-masks are in `numpy.core.multiarray` as the constants
`ITEM_HASOBJECT`, `LIST_PICKLE`, `ITEM_IS_POINTER`, `NEEDS_INIT`,
`NEEDS_PYAPI`, `USE_GETITEM`, `USE_SETITEM`. A full explanation
of these flags is in C-API documentation; they are largely useful
for user-defined data-types.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('hasobject',
"""
Boolean indicating whether this dtype contains any reference-counted
objects in any fields or sub-dtypes.
Recall that what is actually in the ndarray memory representing
the Python object is the memory address of that object (a pointer).
Special handling may be required, and this attribute is useful for
distinguishing data types that may contain arbitrary Python objects
and data-types that won't.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('isbuiltin',
"""
Integer indicating how this dtype relates to the built-in dtypes.
Read-only.
= ========================================================================
0 if this is a structured array type, with fields
1 if this is a dtype compiled into numpy (such as ints, floats etc)
2 if the dtype is for a user-defined numpy type
A user-defined type uses the numpy C-API machinery to extend
numpy to handle a new array type. See
:ref:`user.user-defined-data-types` in the Numpy manual.
= ========================================================================
Examples
--------
>>> dt = np.dtype('i2')
>>> dt.isbuiltin
1
>>> dt = np.dtype('f8')
>>> dt.isbuiltin
1
>>> dt = np.dtype([('field1', 'f8')])
>>> dt.isbuiltin
0
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('isnative',
"""
Boolean indicating whether the byte order of this dtype is native
to the platform.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('isalignedstruct',
"""
Boolean indicating whether the dtype is a struct which maintains
field alignment. This flag is sticky, so when combining multiple
structs together, it is preserved and produces new dtypes which
are also aligned.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('itemsize',
"""
The element size of this data-type object.
For 18 of the 21 types this number is fixed by the data-type.
For the flexible data-types, this number can be anything.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('kind',
"""
A character code (one of 'biufcOSUV') identifying the general kind of data.
= ======================
b boolean
i signed integer
u unsigned integer
f floating-point
c complex floating-point
O object
S (byte-)string
U Unicode
V void
= ======================
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('name',
"""
A bit-width name for this data-type.
Un-sized flexible data-type objects do not have this attribute.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('names',
"""
Ordered list of field names, or ``None`` if there are no fields.
The names are ordered according to increasing byte offset. This can be
used, for example, to walk through all of the named fields in offset order.
Examples
--------
>>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))])
>>> dt.names
('name', 'grades')
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('num',
"""
A unique number for each of the 21 different built-in types.
These are roughly ordered from least-to-most precision.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('shape',
"""
Shape tuple of the sub-array if this data type describes a sub-array,
and ``()`` otherwise.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('str',
"""The array-protocol typestring of this data-type object."""))
add_newdoc('numpy.core.multiarray', 'dtype', ('subdtype',
"""
Tuple ``(item_dtype, shape)`` if this `dtype` describes a sub-array, and
None otherwise.
The *shape* is the fixed shape of the sub-array described by this
data type, and *item_dtype* the data type of the array.
If a field whose dtype object has this attribute is retrieved,
then the extra dimensions implied by *shape* are tacked on to
the end of the retrieved array.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('type',
"""The type object used to instantiate a scalar of this data-type."""))
##############################################################################
#
# dtype methods
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'dtype', ('newbyteorder',
"""
newbyteorder(new_order='S')
Return a new dtype with a different byte order.
Changes are also made in all fields and sub-arrays of the data type.
Parameters
----------
new_order : string, optional
Byte order to force; a value from the byte order
specifications below. The default value ('S') results in
swapping the current byte order.
`new_order` codes can be any of::
* 'S' - swap dtype from current to opposite endian
* {'<', 'L'} - little endian
* {'>', 'B'} - big endian
* {'=', 'N'} - native order
* {'|', 'I'} - ignore (no change to byte order)
The code does a case-insensitive check on the first letter of
`new_order` for these alternatives. For example, any of '>'
or 'B' or 'b' or 'brian' are valid to specify big-endian.
Returns
-------
new_dtype : dtype
New dtype object with the given change to the byte order.
Notes
-----
Changes are also made in all fields and sub-arrays of the data type.
Examples
--------
>>> import sys
>>> sys_is_le = sys.byteorder == 'little'
>>> native_code = sys_is_le and '<' or '>'
>>> swapped_code = sys_is_le and '>' or '<'
>>> native_dt = np.dtype(native_code+'i2')
>>> swapped_dt = np.dtype(swapped_code+'i2')
>>> native_dt.newbyteorder('S') == swapped_dt
True
>>> native_dt.newbyteorder() == swapped_dt
True
>>> native_dt == swapped_dt.newbyteorder('S')
True
>>> native_dt == swapped_dt.newbyteorder('=')
True
>>> native_dt == swapped_dt.newbyteorder('N')
True
>>> native_dt == native_dt.newbyteorder('|')
True
>>> np.dtype('<i2') == native_dt.newbyteorder('<')
True
>>> np.dtype('<i2') == native_dt.newbyteorder('L')
True
>>> np.dtype('>i2') == native_dt.newbyteorder('>')
True
>>> np.dtype('>i2') == native_dt.newbyteorder('B')
True
"""))
##############################################################################
#
# Datetime-related Methods
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'busdaycalendar',
"""
busdaycalendar(weekmask='1111100', holidays=None)
A business day calendar object that efficiently stores information
defining valid days for the busday family of functions.
The default valid days are Monday through Friday ("business days").
A busdaycalendar object can be specified with any set of weekly
valid days, plus an optional "holiday" dates that always will be invalid.
Once a busdaycalendar object is created, the weekmask and holidays
cannot be modified.
.. versionadded:: 1.7.0
Parameters
----------
weekmask : str or array_like of bool, optional
A seven-element array indicating which of Monday through Sunday are
valid days. May be specified as a length-seven list or array, like
[1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string
like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for
weekdays, optionally separated by white space. Valid abbreviations
are: Mon Tue Wed Thu Fri Sat Sun
holidays : array_like of datetime64[D], optional
An array of dates to consider as invalid dates, no matter which
weekday they fall upon. Holiday dates may be specified in any
order, and NaT (not-a-time) dates are ignored. This list is
saved in a normalized form that is suited for fast calculations
of valid days.
Returns
-------
out : busdaycalendar
A business day calendar object containing the specified
weekmask and holidays values.
See Also
--------
is_busday : Returns a boolean array indicating valid days.
busday_offset : Applies an offset counted in valid days.
busday_count : Counts how many valid days are in a half-open date range.
Attributes
----------
Note: once a busdaycalendar object is created, you cannot modify the
weekmask or holidays. The attributes return copies of internal data.
weekmask : (copy) seven-element array of bool
holidays : (copy) sorted array of datetime64[D]
Examples
--------
>>> # Some important days in July
... bdd = np.busdaycalendar(
... holidays=['2011-07-01', '2011-07-04', '2011-07-17'])
>>> # Default is Monday to Friday weekdays
... bdd.weekmask
array([ True, True, True, True, True, False, False], dtype='bool')
>>> # Any holidays already on the weekend are removed
... bdd.holidays
array(['2011-07-01', '2011-07-04'], dtype='datetime64[D]')
""")
add_newdoc('numpy.core.multiarray', 'busdaycalendar', ('weekmask',
"""A copy of the seven-element boolean mask indicating valid days."""))
add_newdoc('numpy.core.multiarray', 'busdaycalendar', ('holidays',
"""A copy of the holiday array indicating additional invalid days."""))
add_newdoc('numpy.core.multiarray', 'is_busday',
"""
is_busday(dates, weekmask='1111100', holidays=None, busdaycal=None, out=None)
Calculates which of the given dates are valid days, and which are not.
.. versionadded:: 1.7.0
Parameters
----------
dates : array_like of datetime64[D]
The array of dates to process.
weekmask : str or array_like of bool, optional
A seven-element array indicating which of Monday through Sunday are
valid days. May be specified as a length-seven list or array, like
[1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string
like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for
weekdays, optionally separated by white space. Valid abbreviations
are: Mon Tue Wed Thu Fri Sat Sun
holidays : array_like of datetime64[D], optional
An array of dates to consider as invalid dates. They may be
specified in any order, and NaT (not-a-time) dates are ignored.
This list is saved in a normalized form that is suited for
fast calculations of valid days.
busdaycal : busdaycalendar, optional
A `busdaycalendar` object which specifies the valid days. If this
parameter is provided, neither weekmask nor holidays may be
provided.
out : array of bool, optional
If provided, this array is filled with the result.
Returns
-------
out : array of bool
An array with the same shape as ``dates``, containing True for
each valid day, and False for each invalid day.
See Also
--------
busdaycalendar: An object that specifies a custom set of valid days.
busday_offset : Applies an offset counted in valid days.
busday_count : Counts how many valid days are in a half-open date range.
Examples
--------
>>> # The weekdays are Friday, Saturday, and Monday
... np.is_busday(['2011-07-01', '2011-07-02', '2011-07-18'],
... holidays=['2011-07-01', '2011-07-04', '2011-07-17'])
array([False, False, True], dtype='bool')
""")
add_newdoc('numpy.core.multiarray', 'busday_offset',
"""
busday_offset(dates, offsets, roll='raise', weekmask='1111100', holidays=None, busdaycal=None, out=None)
First adjusts the date to fall on a valid day according to
the ``roll`` rule, then applies offsets to the given dates
counted in valid days.
.. versionadded:: 1.7.0
Parameters
----------
dates : array_like of datetime64[D]
The array of dates to process.
offsets : array_like of int
The array of offsets, which is broadcast with ``dates``.
roll : {'raise', 'nat', 'forward', 'following', 'backward', 'preceding', 'modifiedfollowing', 'modifiedpreceding'}, optional
How to treat dates that do not fall on a valid day. The default
is 'raise'.
* 'raise' means to raise an exception for an invalid day.
* 'nat' means to return a NaT (not-a-time) for an invalid day.
* 'forward' and 'following' mean to take the first valid day
later in time.
* 'backward' and 'preceding' mean to take the first valid day
earlier in time.
* 'modifiedfollowing' means to take the first valid day
later in time unless it is across a Month boundary, in which
case to take the first valid day earlier in time.
* 'modifiedpreceding' means to take the first valid day
earlier in time unless it is across a Month boundary, in which
case to take the first valid day later in time.
weekmask : str or array_like of bool, optional
A seven-element array indicating which of Monday through Sunday are
valid days. May be specified as a length-seven list or array, like
[1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string
like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for
weekdays, optionally separated by white space. Valid abbreviations
are: Mon Tue Wed Thu Fri Sat Sun
holidays : array_like of datetime64[D], optional
An array of dates to consider as invalid dates. They may be
specified in any order, and NaT (not-a-time) dates are ignored.
This list is saved in a normalized form that is suited for
fast calculations of valid days.
busdaycal : busdaycalendar, optional
A `busdaycalendar` object which specifies the valid days. If this
parameter is provided, neither weekmask nor holidays may be
provided.
out : array of datetime64[D], optional
If provided, this array is filled with the result.
Returns
-------
out : array of datetime64[D]
An array with a shape from broadcasting ``dates`` and ``offsets``
together, containing the dates with offsets applied.
See Also
--------
busdaycalendar: An object that specifies a custom set of valid days.
is_busday : Returns a boolean array indicating valid days.
busday_count : Counts how many valid days are in a half-open date range.
Examples
--------
>>> # First business day in October 2011 (not accounting for holidays)
... np.busday_offset('2011-10', 0, roll='forward')
numpy.datetime64('2011-10-03','D')
>>> # Last business day in February 2012 (not accounting for holidays)
... np.busday_offset('2012-03', -1, roll='forward')
numpy.datetime64('2012-02-29','D')
>>> # Third Wednesday in January 2011
... np.busday_offset('2011-01', 2, roll='forward', weekmask='Wed')
numpy.datetime64('2011-01-19','D')
>>> # 2012 Mother's Day in Canada and the U.S.
... np.busday_offset('2012-05', 1, roll='forward', weekmask='Sun')
numpy.datetime64('2012-05-13','D')
>>> # First business day on or after a date
... np.busday_offset('2011-03-20', 0, roll='forward')
numpy.datetime64('2011-03-21','D')
>>> np.busday_offset('2011-03-22', 0, roll='forward')
numpy.datetime64('2011-03-22','D')
>>> # First business day after a date
... np.busday_offset('2011-03-20', 1, roll='backward')
numpy.datetime64('2011-03-21','D')
>>> np.busday_offset('2011-03-22', 1, roll='backward')
numpy.datetime64('2011-03-23','D')
""")
add_newdoc('numpy.core.multiarray', 'busday_count',
"""
busday_count(begindates, enddates, weekmask='1111100', holidays=[], busdaycal=None, out=None)
Counts the number of valid days between `begindates` and
`enddates`, not including the day of `enddates`.
If ``enddates`` specifies a date value that is earlier than the
corresponding ``begindates`` date value, the count will be negative.
.. versionadded:: 1.7.0
Parameters
----------
begindates : array_like of datetime64[D]
The array of the first dates for counting.
enddates : array_like of datetime64[D]
The array of the end dates for counting, which are excluded
from the count themselves.
weekmask : str or array_like of bool, optional
A seven-element array indicating which of Monday through Sunday are
valid days. May be specified as a length-seven list or array, like
[1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string
like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for
weekdays, optionally separated by white space. Valid abbreviations
are: Mon Tue Wed Thu Fri Sat Sun
holidays : array_like of datetime64[D], optional
An array of dates to consider as invalid dates. They may be
specified in any order, and NaT (not-a-time) dates are ignored.
This list is saved in a normalized form that is suited for
fast calculations of valid days.
busdaycal : busdaycalendar, optional
A `busdaycalendar` object which specifies the valid days. If this
parameter is provided, neither weekmask nor holidays may be
provided.
out : array of int, optional
If provided, this array is filled with the result.
Returns
-------
out : array of int
An array with a shape from broadcasting ``begindates`` and ``enddates``
together, containing the number of valid days between
the begin and end dates.
See Also
--------
busdaycalendar: An object that specifies a custom set of valid days.
is_busday : Returns a boolean array indicating valid days.
busday_offset : Applies an offset counted in valid days.
Examples
--------
>>> # Number of weekdays in January 2011
... np.busday_count('2011-01', '2011-02')
21
>>> # Number of weekdays in 2011
... np.busday_count('2011', '2012')
260
>>> # Number of Saturdays in 2011
... np.busday_count('2011', '2012', weekmask='Sat')
53
""")
##############################################################################
#
# nd_grid instances
#
##############################################################################
add_newdoc('numpy.lib.index_tricks', 'mgrid',
"""
`nd_grid` instance which returns a dense multi-dimensional "meshgrid".
An instance of `numpy.lib.index_tricks.nd_grid` which returns an dense
(or fleshed out) mesh-grid when indexed, so that each returned argument
has the same shape. The dimensions and number of the output arrays are
equal to the number of indexing dimensions. If the step length is not a
complex number, then the stop is not inclusive.
However, if the step length is a **complex number** (e.g. 5j), then
the integer part of its magnitude is interpreted as specifying the
number of points to create between the start and stop values, where
the stop value **is inclusive**.
Returns
----------
mesh-grid `ndarrays` all of the same dimensions
See Also
--------
numpy.lib.index_tricks.nd_grid : class of `ogrid` and `mgrid` objects
ogrid : like mgrid but returns open (not fleshed out) mesh grids
r_ : array concatenator
Examples
--------
>>> np.mgrid[0:5,0:5]
array([[[0, 0, 0, 0, 0],
[1, 1, 1, 1, 1],
[2, 2, 2, 2, 2],
[3, 3, 3, 3, 3],
[4, 4, 4, 4, 4]],
[[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4]]])
>>> np.mgrid[-1:1:5j]
array([-1. , -0.5, 0. , 0.5, 1. ])
""")
add_newdoc('numpy.lib.index_tricks', 'ogrid',
"""
`nd_grid` instance which returns an open multi-dimensional "meshgrid".
An instance of `numpy.lib.index_tricks.nd_grid` which returns an open
(i.e. not fleshed out) mesh-grid when indexed, so that only one dimension
of each returned array is greater than 1. The dimension and number of the
output arrays are equal to the number of indexing dimensions. If the step
length is not a complex number, then the stop is not inclusive.
However, if the step length is a **complex number** (e.g. 5j), then
the integer part of its magnitude is interpreted as specifying the
number of points to create between the start and stop values, where
the stop value **is inclusive**.
Returns
----------
mesh-grid `ndarrays` with only one dimension :math:`\\neq 1`
See Also
--------
np.lib.index_tricks.nd_grid : class of `ogrid` and `mgrid` objects
mgrid : like `ogrid` but returns dense (or fleshed out) mesh grids
r_ : array concatenator
Examples
--------
>>> from numpy import ogrid
>>> ogrid[-1:1:5j]
array([-1. , -0.5, 0. , 0.5, 1. ])
>>> ogrid[0:5,0:5]
[array([[0],
[1],
[2],
[3],
[4]]), array([[0, 1, 2, 3, 4]])]
""")
##############################################################################
#
# Documentation for `generic` attributes and methods
#
##############################################################################
add_newdoc('numpy.core.numerictypes', 'generic',
"""
Base class for numpy scalar types.
Class from which most (all?) numpy scalar types are derived. For
consistency, exposes the same API as `ndarray`, despite many
consequent attributes being either "get-only," or completely irrelevant.
This is the class from which it is strongly suggested users should derive
custom scalar types.
""")
# Attributes
add_newdoc('numpy.core.numerictypes', 'generic', ('T',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class so as to
provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('base',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class so as to
a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('data',
"""Pointer to start of data."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('dtype',
"""Get array data-descriptor."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('flags',
"""The integer value of flags."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('flat',
"""A 1-D view of the scalar."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('imag',
"""The imaginary part of the scalar."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('itemsize',
"""The length of one element in bytes."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('nbytes',
"""The length of the scalar in bytes."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('ndim',
"""The number of array dimensions."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('real',
"""The real part of the scalar."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('shape',
"""Tuple of array dimensions."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('size',
"""The number of elements in the gentype."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('strides',
"""Tuple of bytes steps in each dimension."""))
# Methods
add_newdoc('numpy.core.numerictypes', 'generic', ('all',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('any',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('argmax',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('argmin',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('argsort',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('astype',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('byteswap',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class so as to
provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('choose',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('clip',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('compress',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('conjugate',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('copy',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('cumprod',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('cumsum',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('diagonal',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('dump',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('dumps',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('fill',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('flatten',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('getfield',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('item',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('itemset',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('max',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('mean',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('min',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('newbyteorder',
"""
newbyteorder(new_order='S')
Return a new `dtype` with a different byte order.
Changes are also made in all fields and sub-arrays of the data type.
The `new_order` code can be any from the following:
* {'<', 'L'} - little endian
* {'>', 'B'} - big endian
* {'=', 'N'} - native order
* 'S' - swap dtype from current to opposite endian
* {'|', 'I'} - ignore (no change to byte order)
Parameters
----------
new_order : str, optional
Byte order to force; a value from the byte order specifications
above. The default value ('S') results in swapping the current
byte order. The code does a case-insensitive check on the first
letter of `new_order` for the alternatives above. For example,
any of 'B' or 'b' or 'biggish' are valid to specify big-endian.
Returns
-------
new_dtype : dtype
New `dtype` object with the given change to the byte order.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('nonzero',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('prod',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('ptp',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('put',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('ravel',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('repeat',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('reshape',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('resize',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('round',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('searchsorted',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('setfield',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('setflags',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class so as to
provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('sort',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('squeeze',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('std',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('sum',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('swapaxes',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('take',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('tofile',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('tolist',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('tostring',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('trace',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('transpose',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('var',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('view',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
##############################################################################
#
# Documentation for other scalar classes
#
##############################################################################
add_newdoc('numpy.core.numerictypes', 'bool_',
"""Numpy's Boolean type. Character code: ``?``. Alias: bool8""")
add_newdoc('numpy.core.numerictypes', 'complex64',
"""
Complex number type composed of two 32 bit floats. Character code: 'F'.
""")
add_newdoc('numpy.core.numerictypes', 'complex128',
"""
Complex number type composed of two 64 bit floats. Character code: 'D'.
Python complex compatible.
""")
add_newdoc('numpy.core.numerictypes', 'complex256',
"""
Complex number type composed of two 128-bit floats. Character code: 'G'.
""")
add_newdoc('numpy.core.numerictypes', 'float32',
"""
32-bit floating-point number. Character code 'f'. C float compatible.
""")
add_newdoc('numpy.core.numerictypes', 'float64',
"""
64-bit floating-point number. Character code 'd'. Python float compatible.
""")
add_newdoc('numpy.core.numerictypes', 'float96',
"""
""")
add_newdoc('numpy.core.numerictypes', 'float128',
"""
128-bit floating-point number. Character code: 'g'. C long float
compatible.
""")
add_newdoc('numpy.core.numerictypes', 'int8',
"""8-bit integer. Character code ``b``. C char compatible.""")
add_newdoc('numpy.core.numerictypes', 'int16',
"""16-bit integer. Character code ``h``. C short compatible.""")
add_newdoc('numpy.core.numerictypes', 'int32',
"""32-bit integer. Character code 'i'. C int compatible.""")
add_newdoc('numpy.core.numerictypes', 'int64',
"""64-bit integer. Character code 'l'. Python int compatible.""")
add_newdoc('numpy.core.numerictypes', 'object_',
"""Any Python object. Character code: 'O'.""")
|
dato-code/numpy
|
numpy/add_newdocs.py
|
Python
|
bsd-3-clause
| 219,023
|
[
"Brian"
] |
da5c332998479770f6285eaa1f18e61505706e84e7c26ebceb94e87012f52e19
|
# -*- coding: utf-8 -*-
# module pyparsing.py
#
# Copyright (c) 2003-2019 Paul T. McGuire
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__doc__ = \
"""
pyparsing module - Classes and methods to define and execute parsing grammars
=============================================================================
The pyparsing module is an alternative approach to creating and
executing simple grammars, vs. the traditional lex/yacc approach, or the
use of regular expressions. With pyparsing, you don't need to learn
a new syntax for defining grammars or matching expressions - the parsing
module provides a library of classes that you use to construct the
grammar directly in Python.
Here is a program to parse "Hello, World!" (or any greeting of the form
``"<salutation>, <addressee>!"``), built up using :class:`Word`,
:class:`Literal`, and :class:`And` elements
(the :class:`'+'<ParserElement.__add__>` operators create :class:`And` expressions,
and the strings are auto-converted to :class:`Literal` expressions)::
from pyparsing import Word, alphas
# define grammar of a greeting
greet = Word(alphas) + "," + Word(alphas) + "!"
hello = "Hello, World!"
print (hello, "->", greet.parseString(hello))
The program outputs the following::
Hello, World! -> ['Hello', ',', 'World', '!']
The Python representation of the grammar is quite readable, owing to the
self-explanatory class names, and the use of '+', '|' and '^' operators.
The :class:`ParseResults` object returned from
:class:`ParserElement.parseString` can be
accessed as a nested list, a dictionary, or an object with named
attributes.
The pyparsing module handles some of the problems that are typically
vexing when writing text parsers:
- extra or missing whitespace (the above program will also handle
"Hello,World!", "Hello , World !", etc.)
- quoted strings
- embedded comments
Getting Started -
-----------------
Visit the classes :class:`ParserElement` and :class:`ParseResults` to
see the base classes that most other pyparsing
classes inherit from. Use the docstrings for examples of how to:
- construct literal match expressions from :class:`Literal` and
:class:`CaselessLiteral` classes
- construct character word-group expressions using the :class:`Word`
class
- see how to create repetitive expressions using :class:`ZeroOrMore`
and :class:`OneOrMore` classes
- use :class:`'+'<And>`, :class:`'|'<MatchFirst>`, :class:`'^'<Or>`,
and :class:`'&'<Each>` operators to combine simple expressions into
more complex ones
- associate names with your parsed results using
:class:`ParserElement.setResultsName`
- access the parsed data, which is returned as a :class:`ParseResults`
object
- find some helpful expression short-cuts like :class:`delimitedList`
and :class:`oneOf`
- find more useful common expressions in the :class:`pyparsing_common`
namespace class
"""
__version__ = "2.4.7"
__versionTime__ = "30 Mar 2020 00:43 UTC"
__author__ = "Paul McGuire <ptmcg@users.sourceforge.net>"
import string
from weakref import ref as wkref
import copy
import sys
import warnings
import re
import sre_constants
import collections
import pprint
import traceback
import types
from datetime import datetime
from operator import itemgetter
import itertools
from functools import wraps
from contextlib import contextmanager
try:
# Python 3
from itertools import filterfalse
except ImportError:
from itertools import ifilterfalse as filterfalse
try:
from _thread import RLock
except ImportError:
from threading import RLock
try:
# Python 3
from collections.abc import Iterable
from collections.abc import MutableMapping, Mapping
except ImportError:
# Python 2.7
from collections import Iterable
from collections import MutableMapping, Mapping
try:
from collections import OrderedDict as _OrderedDict
except ImportError:
try:
from ordereddict import OrderedDict as _OrderedDict
except ImportError:
_OrderedDict = None
try:
from types import SimpleNamespace
except ImportError:
class SimpleNamespace: pass
# version compatibility configuration
__compat__ = SimpleNamespace()
__compat__.__doc__ = """
A cross-version compatibility configuration for pyparsing features that will be
released in a future version. By setting values in this configuration to True,
those features can be enabled in prior versions for compatibility development
and testing.
- collect_all_And_tokens - flag to enable fix for Issue #63 that fixes erroneous grouping
of results names when an And expression is nested within an Or or MatchFirst; set to
True to enable bugfix released in pyparsing 2.3.0, or False to preserve
pre-2.3.0 handling of named results
"""
__compat__.collect_all_And_tokens = True
__diag__ = SimpleNamespace()
__diag__.__doc__ = """
Diagnostic configuration (all default to False)
- warn_multiple_tokens_in_named_alternation - flag to enable warnings when a results
name is defined on a MatchFirst or Or expression with one or more And subexpressions
(only warns if __compat__.collect_all_And_tokens is False)
- warn_ungrouped_named_tokens_in_collection - flag to enable warnings when a results
name is defined on a containing expression with ungrouped subexpressions that also
have results names
- warn_name_set_on_empty_Forward - flag to enable warnings whan a Forward is defined
with a results name, but has no contents defined
- warn_on_multiple_string_args_to_oneof - flag to enable warnings whan oneOf is
incorrectly called with multiple str arguments
- enable_debug_on_named_expressions - flag to auto-enable debug on all subsequent
calls to ParserElement.setName()
"""
__diag__.warn_multiple_tokens_in_named_alternation = False
__diag__.warn_ungrouped_named_tokens_in_collection = False
__diag__.warn_name_set_on_empty_Forward = False
__diag__.warn_on_multiple_string_args_to_oneof = False
__diag__.enable_debug_on_named_expressions = False
__diag__._all_names = [nm for nm in vars(__diag__) if nm.startswith("enable_") or nm.startswith("warn_")]
def _enable_all_warnings():
__diag__.warn_multiple_tokens_in_named_alternation = True
__diag__.warn_ungrouped_named_tokens_in_collection = True
__diag__.warn_name_set_on_empty_Forward = True
__diag__.warn_on_multiple_string_args_to_oneof = True
__diag__.enable_all_warnings = _enable_all_warnings
__all__ = ['__version__', '__versionTime__', '__author__', '__compat__', '__diag__',
'And', 'CaselessKeyword', 'CaselessLiteral', 'CharsNotIn', 'Combine', 'Dict', 'Each', 'Empty',
'FollowedBy', 'Forward', 'GoToColumn', 'Group', 'Keyword', 'LineEnd', 'LineStart', 'Literal',
'PrecededBy', 'MatchFirst', 'NoMatch', 'NotAny', 'OneOrMore', 'OnlyOnce', 'Optional', 'Or',
'ParseBaseException', 'ParseElementEnhance', 'ParseException', 'ParseExpression', 'ParseFatalException',
'ParseResults', 'ParseSyntaxException', 'ParserElement', 'QuotedString', 'RecursiveGrammarException',
'Regex', 'SkipTo', 'StringEnd', 'StringStart', 'Suppress', 'Token', 'TokenConverter',
'White', 'Word', 'WordEnd', 'WordStart', 'ZeroOrMore', 'Char',
'alphanums', 'alphas', 'alphas8bit', 'anyCloseTag', 'anyOpenTag', 'cStyleComment', 'col',
'commaSeparatedList', 'commonHTMLEntity', 'countedArray', 'cppStyleComment', 'dblQuotedString',
'dblSlashComment', 'delimitedList', 'dictOf', 'downcaseTokens', 'empty', 'hexnums',
'htmlComment', 'javaStyleComment', 'line', 'lineEnd', 'lineStart', 'lineno',
'makeHTMLTags', 'makeXMLTags', 'matchOnlyAtCol', 'matchPreviousExpr', 'matchPreviousLiteral',
'nestedExpr', 'nullDebugAction', 'nums', 'oneOf', 'opAssoc', 'operatorPrecedence', 'printables',
'punc8bit', 'pythonStyleComment', 'quotedString', 'removeQuotes', 'replaceHTMLEntity',
'replaceWith', 'restOfLine', 'sglQuotedString', 'srange', 'stringEnd',
'stringStart', 'traceParseAction', 'unicodeString', 'upcaseTokens', 'withAttribute',
'indentedBlock', 'originalTextFor', 'ungroup', 'infixNotation', 'locatedExpr', 'withClass',
'CloseMatch', 'tokenMap', 'pyparsing_common', 'pyparsing_unicode', 'unicode_set',
'conditionAsParseAction', 're',
]
system_version = tuple(sys.version_info)[:3]
PY_3 = system_version[0] == 3
if PY_3:
_MAX_INT = sys.maxsize
basestring = str
unichr = chr
unicode = str
_ustr = str
# build list of single arg builtins, that can be used as parse actions
singleArgBuiltins = [sum, len, sorted, reversed, list, tuple, set, any, all, min, max]
else:
_MAX_INT = sys.maxint
range = xrange
def _ustr(obj):
"""Drop-in replacement for str(obj) that tries to be Unicode
friendly. It first tries str(obj). If that fails with
a UnicodeEncodeError, then it tries unicode(obj). It then
< returns the unicode object | encodes it with the default
encoding | ... >.
"""
if isinstance(obj, unicode):
return obj
try:
# If this works, then _ustr(obj) has the same behaviour as str(obj), so
# it won't break any existing code.
return str(obj)
except UnicodeEncodeError:
# Else encode it
ret = unicode(obj).encode(sys.getdefaultencoding(), 'xmlcharrefreplace')
xmlcharref = Regex(r'&#\d+;')
xmlcharref.setParseAction(lambda t: '\\u' + hex(int(t[0][2:-1]))[2:])
return xmlcharref.transformString(ret)
# build list of single arg builtins, tolerant of Python version, that can be used as parse actions
singleArgBuiltins = []
import __builtin__
for fname in "sum len sorted reversed list tuple set any all min max".split():
try:
singleArgBuiltins.append(getattr(__builtin__, fname))
except AttributeError:
continue
_generatorType = type((y for y in range(1)))
def _xml_escape(data):
"""Escape &, <, >, ", ', etc. in a string of data."""
# ampersand must be replaced first
from_symbols = '&><"\''
to_symbols = ('&' + s + ';' for s in "amp gt lt quot apos".split())
for from_, to_ in zip(from_symbols, to_symbols):
data = data.replace(from_, to_)
return data
alphas = string.ascii_uppercase + string.ascii_lowercase
nums = "0123456789"
hexnums = nums + "ABCDEFabcdef"
alphanums = alphas + nums
_bslash = chr(92)
printables = "".join(c for c in string.printable if c not in string.whitespace)
def conditionAsParseAction(fn, message=None, fatal=False):
msg = message if message is not None else "failed user-defined condition"
exc_type = ParseFatalException if fatal else ParseException
fn = _trim_arity(fn)
@wraps(fn)
def pa(s, l, t):
if not bool(fn(s, l, t)):
raise exc_type(s, l, msg)
return pa
class ParseBaseException(Exception):
"""base exception class for all parsing runtime exceptions"""
# Performance tuning: we construct a *lot* of these, so keep this
# constructor as small and fast as possible
def __init__(self, pstr, loc=0, msg=None, elem=None):
self.loc = loc
if msg is None:
self.msg = pstr
self.pstr = ""
else:
self.msg = msg
self.pstr = pstr
self.parserElement = elem
self.args = (pstr, loc, msg)
@classmethod
def _from_exception(cls, pe):
"""
internal factory method to simplify creating one type of ParseException
from another - avoids having __init__ signature conflicts among subclasses
"""
return cls(pe.pstr, pe.loc, pe.msg, pe.parserElement)
def __getattr__(self, aname):
"""supported attributes by name are:
- lineno - returns the line number of the exception text
- col - returns the column number of the exception text
- line - returns the line containing the exception text
"""
if aname == "lineno":
return lineno(self.loc, self.pstr)
elif aname in ("col", "column"):
return col(self.loc, self.pstr)
elif aname == "line":
return line(self.loc, self.pstr)
else:
raise AttributeError(aname)
def __str__(self):
if self.pstr:
if self.loc >= len(self.pstr):
foundstr = ', found end of text'
else:
foundstr = (', found %r' % self.pstr[self.loc:self.loc + 1]).replace(r'\\', '\\')
else:
foundstr = ''
return ("%s%s (at char %d), (line:%d, col:%d)" %
(self.msg, foundstr, self.loc, self.lineno, self.column))
def __repr__(self):
return _ustr(self)
def markInputline(self, markerString=">!<"):
"""Extracts the exception line from the input string, and marks
the location of the exception with a special symbol.
"""
line_str = self.line
line_column = self.column - 1
if markerString:
line_str = "".join((line_str[:line_column],
markerString, line_str[line_column:]))
return line_str.strip()
def __dir__(self):
return "lineno col line".split() + dir(type(self))
class ParseException(ParseBaseException):
"""
Exception thrown when parse expressions don't match class;
supported attributes by name are:
- lineno - returns the line number of the exception text
- col - returns the column number of the exception text
- line - returns the line containing the exception text
Example::
try:
Word(nums).setName("integer").parseString("ABC")
except ParseException as pe:
print(pe)
print("column: {}".format(pe.col))
prints::
Expected integer (at char 0), (line:1, col:1)
column: 1
"""
@staticmethod
def explain(exc, depth=16):
"""
Method to take an exception and translate the Python internal traceback into a list
of the pyparsing expressions that caused the exception to be raised.
Parameters:
- exc - exception raised during parsing (need not be a ParseException, in support
of Python exceptions that might be raised in a parse action)
- depth (default=16) - number of levels back in the stack trace to list expression
and function names; if None, the full stack trace names will be listed; if 0, only
the failing input line, marker, and exception string will be shown
Returns a multi-line string listing the ParserElements and/or function names in the
exception's stack trace.
Note: the diagnostic output will include string representations of the expressions
that failed to parse. These representations will be more helpful if you use `setName` to
give identifiable names to your expressions. Otherwise they will use the default string
forms, which may be cryptic to read.
explain() is only supported under Python 3.
"""
import inspect
if depth is None:
depth = sys.getrecursionlimit()
ret = []
if isinstance(exc, ParseBaseException):
ret.append(exc.line)
ret.append(' ' * (exc.col - 1) + '^')
ret.append("{0}: {1}".format(type(exc).__name__, exc))
if depth > 0:
callers = inspect.getinnerframes(exc.__traceback__, context=depth)
seen = set()
for i, ff in enumerate(callers[-depth:]):
frm = ff[0]
f_self = frm.f_locals.get('self', None)
if isinstance(f_self, ParserElement):
if frm.f_code.co_name not in ('parseImpl', '_parseNoCache'):
continue
if f_self in seen:
continue
seen.add(f_self)
self_type = type(f_self)
ret.append("{0}.{1} - {2}".format(self_type.__module__,
self_type.__name__,
f_self))
elif f_self is not None:
self_type = type(f_self)
ret.append("{0}.{1}".format(self_type.__module__,
self_type.__name__))
else:
code = frm.f_code
if code.co_name in ('wrapper', '<module>'):
continue
ret.append("{0}".format(code.co_name))
depth -= 1
if not depth:
break
return '\n'.join(ret)
class ParseFatalException(ParseBaseException):
"""user-throwable exception thrown when inconsistent parse content
is found; stops all parsing immediately"""
pass
class ParseSyntaxException(ParseFatalException):
"""just like :class:`ParseFatalException`, but thrown internally
when an :class:`ErrorStop<And._ErrorStop>` ('-' operator) indicates
that parsing is to stop immediately because an unbacktrackable
syntax error has been found.
"""
pass
#~ class ReparseException(ParseBaseException):
#~ """Experimental class - parse actions can raise this exception to cause
#~ pyparsing to reparse the input string:
#~ - with a modified input string, and/or
#~ - with a modified start location
#~ Set the values of the ReparseException in the constructor, and raise the
#~ exception in a parse action to cause pyparsing to use the new string/location.
#~ Setting the values as None causes no change to be made.
#~ """
#~ def __init_( self, newstring, restartLoc ):
#~ self.newParseText = newstring
#~ self.reparseLoc = restartLoc
class RecursiveGrammarException(Exception):
"""exception thrown by :class:`ParserElement.validate` if the
grammar could be improperly recursive
"""
def __init__(self, parseElementList):
self.parseElementTrace = parseElementList
def __str__(self):
return "RecursiveGrammarException: %s" % self.parseElementTrace
class _ParseResultsWithOffset(object):
def __init__(self, p1, p2):
self.tup = (p1, p2)
def __getitem__(self, i):
return self.tup[i]
def __repr__(self):
return repr(self.tup[0])
def setOffset(self, i):
self.tup = (self.tup[0], i)
class ParseResults(object):
"""Structured parse results, to provide multiple means of access to
the parsed data:
- as a list (``len(results)``)
- by list index (``results[0], results[1]``, etc.)
- by attribute (``results.<resultsName>`` - see :class:`ParserElement.setResultsName`)
Example::
integer = Word(nums)
date_str = (integer.setResultsName("year") + '/'
+ integer.setResultsName("month") + '/'
+ integer.setResultsName("day"))
# equivalent form:
# date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
# parseString returns a ParseResults object
result = date_str.parseString("1999/12/31")
def test(s, fn=repr):
print("%s -> %s" % (s, fn(eval(s))))
test("list(result)")
test("result[0]")
test("result['month']")
test("result.day")
test("'month' in result")
test("'minutes' in result")
test("result.dump()", str)
prints::
list(result) -> ['1999', '/', '12', '/', '31']
result[0] -> '1999'
result['month'] -> '12'
result.day -> '31'
'month' in result -> True
'minutes' in result -> False
result.dump() -> ['1999', '/', '12', '/', '31']
- day: 31
- month: 12
- year: 1999
"""
def __new__(cls, toklist=None, name=None, asList=True, modal=True):
if isinstance(toklist, cls):
return toklist
retobj = object.__new__(cls)
retobj.__doinit = True
return retobj
# Performance tuning: we construct a *lot* of these, so keep this
# constructor as small and fast as possible
def __init__(self, toklist=None, name=None, asList=True, modal=True, isinstance=isinstance):
if self.__doinit:
self.__doinit = False
self.__name = None
self.__parent = None
self.__accumNames = {}
self.__asList = asList
self.__modal = modal
if toklist is None:
toklist = []
if isinstance(toklist, list):
self.__toklist = toklist[:]
elif isinstance(toklist, _generatorType):
self.__toklist = list(toklist)
else:
self.__toklist = [toklist]
self.__tokdict = dict()
if name is not None and name:
if not modal:
self.__accumNames[name] = 0
if isinstance(name, int):
name = _ustr(name) # will always return a str, but use _ustr for consistency
self.__name = name
if not (isinstance(toklist, (type(None), basestring, list)) and toklist in (None, '', [])):
if isinstance(toklist, basestring):
toklist = [toklist]
if asList:
if isinstance(toklist, ParseResults):
self[name] = _ParseResultsWithOffset(ParseResults(toklist.__toklist), 0)
else:
self[name] = _ParseResultsWithOffset(ParseResults(toklist[0]), 0)
self[name].__name = name
else:
try:
self[name] = toklist[0]
except (KeyError, TypeError, IndexError):
self[name] = toklist
def __getitem__(self, i):
if isinstance(i, (int, slice)):
return self.__toklist[i]
else:
if i not in self.__accumNames:
return self.__tokdict[i][-1][0]
else:
return ParseResults([v[0] for v in self.__tokdict[i]])
def __setitem__(self, k, v, isinstance=isinstance):
if isinstance(v, _ParseResultsWithOffset):
self.__tokdict[k] = self.__tokdict.get(k, list()) + [v]
sub = v[0]
elif isinstance(k, (int, slice)):
self.__toklist[k] = v
sub = v
else:
self.__tokdict[k] = self.__tokdict.get(k, list()) + [_ParseResultsWithOffset(v, 0)]
sub = v
if isinstance(sub, ParseResults):
sub.__parent = wkref(self)
def __delitem__(self, i):
if isinstance(i, (int, slice)):
mylen = len(self.__toklist)
del self.__toklist[i]
# convert int to slice
if isinstance(i, int):
if i < 0:
i += mylen
i = slice(i, i + 1)
# get removed indices
removed = list(range(*i.indices(mylen)))
removed.reverse()
# fixup indices in token dictionary
for name, occurrences in self.__tokdict.items():
for j in removed:
for k, (value, position) in enumerate(occurrences):
occurrences[k] = _ParseResultsWithOffset(value, position - (position > j))
else:
del self.__tokdict[i]
def __contains__(self, k):
return k in self.__tokdict
def __len__(self):
return len(self.__toklist)
def __bool__(self):
return (not not self.__toklist)
__nonzero__ = __bool__
def __iter__(self):
return iter(self.__toklist)
def __reversed__(self):
return iter(self.__toklist[::-1])
def _iterkeys(self):
if hasattr(self.__tokdict, "iterkeys"):
return self.__tokdict.iterkeys()
else:
return iter(self.__tokdict)
def _itervalues(self):
return (self[k] for k in self._iterkeys())
def _iteritems(self):
return ((k, self[k]) for k in self._iterkeys())
if PY_3:
keys = _iterkeys
"""Returns an iterator of all named result keys."""
values = _itervalues
"""Returns an iterator of all named result values."""
items = _iteritems
"""Returns an iterator of all named result key-value tuples."""
else:
iterkeys = _iterkeys
"""Returns an iterator of all named result keys (Python 2.x only)."""
itervalues = _itervalues
"""Returns an iterator of all named result values (Python 2.x only)."""
iteritems = _iteritems
"""Returns an iterator of all named result key-value tuples (Python 2.x only)."""
def keys(self):
"""Returns all named result keys (as a list in Python 2.x, as an iterator in Python 3.x)."""
return list(self.iterkeys())
def values(self):
"""Returns all named result values (as a list in Python 2.x, as an iterator in Python 3.x)."""
return list(self.itervalues())
def items(self):
"""Returns all named result key-values (as a list of tuples in Python 2.x, as an iterator in Python 3.x)."""
return list(self.iteritems())
def haskeys(self):
"""Since keys() returns an iterator, this method is helpful in bypassing
code that looks for the existence of any defined results names."""
return bool(self.__tokdict)
def pop(self, *args, **kwargs):
"""
Removes and returns item at specified index (default= ``last``).
Supports both ``list`` and ``dict`` semantics for ``pop()``. If
passed no argument or an integer argument, it will use ``list``
semantics and pop tokens from the list of parsed tokens. If passed
a non-integer argument (most likely a string), it will use ``dict``
semantics and pop the corresponding value from any defined results
names. A second default return value argument is supported, just as in
``dict.pop()``.
Example::
def remove_first(tokens):
tokens.pop(0)
print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
print(OneOrMore(Word(nums)).addParseAction(remove_first).parseString("0 123 321")) # -> ['123', '321']
label = Word(alphas)
patt = label("LABEL") + OneOrMore(Word(nums))
print(patt.parseString("AAB 123 321").dump())
# Use pop() in a parse action to remove named result (note that corresponding value is not
# removed from list form of results)
def remove_LABEL(tokens):
tokens.pop("LABEL")
return tokens
patt.addParseAction(remove_LABEL)
print(patt.parseString("AAB 123 321").dump())
prints::
['AAB', '123', '321']
- LABEL: AAB
['AAB', '123', '321']
"""
if not args:
args = [-1]
for k, v in kwargs.items():
if k == 'default':
args = (args[0], v)
else:
raise TypeError("pop() got an unexpected keyword argument '%s'" % k)
if (isinstance(args[0], int)
or len(args) == 1
or args[0] in self):
index = args[0]
ret = self[index]
del self[index]
return ret
else:
defaultvalue = args[1]
return defaultvalue
def get(self, key, defaultValue=None):
"""
Returns named result matching the given key, or if there is no
such name, then returns the given ``defaultValue`` or ``None`` if no
``defaultValue`` is specified.
Similar to ``dict.get()``.
Example::
integer = Word(nums)
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
result = date_str.parseString("1999/12/31")
print(result.get("year")) # -> '1999'
print(result.get("hour", "not specified")) # -> 'not specified'
print(result.get("hour")) # -> None
"""
if key in self:
return self[key]
else:
return defaultValue
def insert(self, index, insStr):
"""
Inserts new element at location index in the list of parsed tokens.
Similar to ``list.insert()``.
Example::
print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
# use a parse action to insert the parse location in the front of the parsed results
def insert_locn(locn, tokens):
tokens.insert(0, locn)
print(OneOrMore(Word(nums)).addParseAction(insert_locn).parseString("0 123 321")) # -> [0, '0', '123', '321']
"""
self.__toklist.insert(index, insStr)
# fixup indices in token dictionary
for name, occurrences in self.__tokdict.items():
for k, (value, position) in enumerate(occurrences):
occurrences[k] = _ParseResultsWithOffset(value, position + (position > index))
def append(self, item):
"""
Add single element to end of ParseResults list of elements.
Example::
print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
# use a parse action to compute the sum of the parsed integers, and add it to the end
def append_sum(tokens):
tokens.append(sum(map(int, tokens)))
print(OneOrMore(Word(nums)).addParseAction(append_sum).parseString("0 123 321")) # -> ['0', '123', '321', 444]
"""
self.__toklist.append(item)
def extend(self, itemseq):
"""
Add sequence of elements to end of ParseResults list of elements.
Example::
patt = OneOrMore(Word(alphas))
# use a parse action to append the reverse of the matched strings, to make a palindrome
def make_palindrome(tokens):
tokens.extend(reversed([t[::-1] for t in tokens]))
return ''.join(tokens)
print(patt.addParseAction(make_palindrome).parseString("lskdj sdlkjf lksd")) # -> 'lskdjsdlkjflksddsklfjkldsjdksl'
"""
if isinstance(itemseq, ParseResults):
self.__iadd__(itemseq)
else:
self.__toklist.extend(itemseq)
def clear(self):
"""
Clear all elements and results names.
"""
del self.__toklist[:]
self.__tokdict.clear()
def __getattr__(self, name):
try:
return self[name]
except KeyError:
return ""
def __add__(self, other):
ret = self.copy()
ret += other
return ret
def __iadd__(self, other):
if other.__tokdict:
offset = len(self.__toklist)
addoffset = lambda a: offset if a < 0 else a + offset
otheritems = other.__tokdict.items()
otherdictitems = [(k, _ParseResultsWithOffset(v[0], addoffset(v[1])))
for k, vlist in otheritems for v in vlist]
for k, v in otherdictitems:
self[k] = v
if isinstance(v[0], ParseResults):
v[0].__parent = wkref(self)
self.__toklist += other.__toklist
self.__accumNames.update(other.__accumNames)
return self
def __radd__(self, other):
if isinstance(other, int) and other == 0:
# useful for merging many ParseResults using sum() builtin
return self.copy()
else:
# this may raise a TypeError - so be it
return other + self
def __repr__(self):
return "(%s, %s)" % (repr(self.__toklist), repr(self.__tokdict))
def __str__(self):
return '[' + ', '.join(_ustr(i) if isinstance(i, ParseResults) else repr(i) for i in self.__toklist) + ']'
def _asStringList(self, sep=''):
out = []
for item in self.__toklist:
if out and sep:
out.append(sep)
if isinstance(item, ParseResults):
out += item._asStringList()
else:
out.append(_ustr(item))
return out
def asList(self):
"""
Returns the parse results as a nested list of matching tokens, all converted to strings.
Example::
patt = OneOrMore(Word(alphas))
result = patt.parseString("sldkj lsdkj sldkj")
# even though the result prints in string-like form, it is actually a pyparsing ParseResults
print(type(result), result) # -> <class 'pyparsing.ParseResults'> ['sldkj', 'lsdkj', 'sldkj']
# Use asList() to create an actual list
result_list = result.asList()
print(type(result_list), result_list) # -> <class 'list'> ['sldkj', 'lsdkj', 'sldkj']
"""
return [res.asList() if isinstance(res, ParseResults) else res for res in self.__toklist]
def asDict(self):
"""
Returns the named parse results as a nested dictionary.
Example::
integer = Word(nums)
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
result = date_str.parseString('12/31/1999')
print(type(result), repr(result)) # -> <class 'pyparsing.ParseResults'> (['12', '/', '31', '/', '1999'], {'day': [('1999', 4)], 'year': [('12', 0)], 'month': [('31', 2)]})
result_dict = result.asDict()
print(type(result_dict), repr(result_dict)) # -> <class 'dict'> {'day': '1999', 'year': '12', 'month': '31'}
# even though a ParseResults supports dict-like access, sometime you just need to have a dict
import json
print(json.dumps(result)) # -> Exception: TypeError: ... is not JSON serializable
print(json.dumps(result.asDict())) # -> {"month": "31", "day": "1999", "year": "12"}
"""
if PY_3:
item_fn = self.items
else:
item_fn = self.iteritems
def toItem(obj):
if isinstance(obj, ParseResults):
if obj.haskeys():
return obj.asDict()
else:
return [toItem(v) for v in obj]
else:
return obj
return dict((k, toItem(v)) for k, v in item_fn())
def copy(self):
"""
Returns a new copy of a :class:`ParseResults` object.
"""
ret = ParseResults(self.__toklist)
ret.__tokdict = dict(self.__tokdict.items())
ret.__parent = self.__parent
ret.__accumNames.update(self.__accumNames)
ret.__name = self.__name
return ret
def asXML(self, doctag=None, namedItemsOnly=False, indent="", formatted=True):
"""
(Deprecated) Returns the parse results as XML. Tags are created for tokens and lists that have defined results names.
"""
nl = "\n"
out = []
namedItems = dict((v[1], k) for (k, vlist) in self.__tokdict.items()
for v in vlist)
nextLevelIndent = indent + " "
# collapse out indents if formatting is not desired
if not formatted:
indent = ""
nextLevelIndent = ""
nl = ""
selfTag = None
if doctag is not None:
selfTag = doctag
else:
if self.__name:
selfTag = self.__name
if not selfTag:
if namedItemsOnly:
return ""
else:
selfTag = "ITEM"
out += [nl, indent, "<", selfTag, ">"]
for i, res in enumerate(self.__toklist):
if isinstance(res, ParseResults):
if i in namedItems:
out += [res.asXML(namedItems[i],
namedItemsOnly and doctag is None,
nextLevelIndent,
formatted)]
else:
out += [res.asXML(None,
namedItemsOnly and doctag is None,
nextLevelIndent,
formatted)]
else:
# individual token, see if there is a name for it
resTag = None
if i in namedItems:
resTag = namedItems[i]
if not resTag:
if namedItemsOnly:
continue
else:
resTag = "ITEM"
xmlBodyText = _xml_escape(_ustr(res))
out += [nl, nextLevelIndent, "<", resTag, ">",
xmlBodyText,
"</", resTag, ">"]
out += [nl, indent, "</", selfTag, ">"]
return "".join(out)
def __lookup(self, sub):
for k, vlist in self.__tokdict.items():
for v, loc in vlist:
if sub is v:
return k
return None
def getName(self):
r"""
Returns the results name for this token expression. Useful when several
different expressions might match at a particular location.
Example::
integer = Word(nums)
ssn_expr = Regex(r"\d\d\d-\d\d-\d\d\d\d")
house_number_expr = Suppress('#') + Word(nums, alphanums)
user_data = (Group(house_number_expr)("house_number")
| Group(ssn_expr)("ssn")
| Group(integer)("age"))
user_info = OneOrMore(user_data)
result = user_info.parseString("22 111-22-3333 #221B")
for item in result:
print(item.getName(), ':', item[0])
prints::
age : 22
ssn : 111-22-3333
house_number : 221B
"""
if self.__name:
return self.__name
elif self.__parent:
par = self.__parent()
if par:
return par.__lookup(self)
else:
return None
elif (len(self) == 1
and len(self.__tokdict) == 1
and next(iter(self.__tokdict.values()))[0][1] in (0, -1)):
return next(iter(self.__tokdict.keys()))
else:
return None
def dump(self, indent='', full=True, include_list=True, _depth=0):
"""
Diagnostic method for listing out the contents of
a :class:`ParseResults`. Accepts an optional ``indent`` argument so
that this string can be embedded in a nested display of other data.
Example::
integer = Word(nums)
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
result = date_str.parseString('12/31/1999')
print(result.dump())
prints::
['12', '/', '31', '/', '1999']
- day: 1999
- month: 31
- year: 12
"""
out = []
NL = '\n'
if include_list:
out.append(indent + _ustr(self.asList()))
else:
out.append('')
if full:
if self.haskeys():
items = sorted((str(k), v) for k, v in self.items())
for k, v in items:
if out:
out.append(NL)
out.append("%s%s- %s: " % (indent, (' ' * _depth), k))
if isinstance(v, ParseResults):
if v:
out.append(v.dump(indent=indent, full=full, include_list=include_list, _depth=_depth + 1))
else:
out.append(_ustr(v))
else:
out.append(repr(v))
elif any(isinstance(vv, ParseResults) for vv in self):
v = self
for i, vv in enumerate(v):
if isinstance(vv, ParseResults):
out.append("\n%s%s[%d]:\n%s%s%s" % (indent,
(' ' * (_depth)),
i,
indent,
(' ' * (_depth + 1)),
vv.dump(indent=indent,
full=full,
include_list=include_list,
_depth=_depth + 1)))
else:
out.append("\n%s%s[%d]:\n%s%s%s" % (indent,
(' ' * (_depth)),
i,
indent,
(' ' * (_depth + 1)),
_ustr(vv)))
return "".join(out)
def pprint(self, *args, **kwargs):
"""
Pretty-printer for parsed results as a list, using the
`pprint <https://docs.python.org/3/library/pprint.html>`_ module.
Accepts additional positional or keyword args as defined for
`pprint.pprint <https://docs.python.org/3/library/pprint.html#pprint.pprint>`_ .
Example::
ident = Word(alphas, alphanums)
num = Word(nums)
func = Forward()
term = ident | num | Group('(' + func + ')')
func <<= ident + Group(Optional(delimitedList(term)))
result = func.parseString("fna a,b,(fnb c,d,200),100")
result.pprint(width=40)
prints::
['fna',
['a',
'b',
['(', 'fnb', ['c', 'd', '200'], ')'],
'100']]
"""
pprint.pprint(self.asList(), *args, **kwargs)
# add support for pickle protocol
def __getstate__(self):
return (self.__toklist,
(self.__tokdict.copy(),
self.__parent is not None and self.__parent() or None,
self.__accumNames,
self.__name))
def __setstate__(self, state):
self.__toklist = state[0]
self.__tokdict, par, inAccumNames, self.__name = state[1]
self.__accumNames = {}
self.__accumNames.update(inAccumNames)
if par is not None:
self.__parent = wkref(par)
else:
self.__parent = None
def __getnewargs__(self):
return self.__toklist, self.__name, self.__asList, self.__modal
def __dir__(self):
return dir(type(self)) + list(self.keys())
@classmethod
def from_dict(cls, other, name=None):
"""
Helper classmethod to construct a ParseResults from a dict, preserving the
name-value relations as results names. If an optional 'name' argument is
given, a nested ParseResults will be returned
"""
def is_iterable(obj):
try:
iter(obj)
except Exception:
return False
else:
if PY_3:
return not isinstance(obj, (str, bytes))
else:
return not isinstance(obj, basestring)
ret = cls([])
for k, v in other.items():
if isinstance(v, Mapping):
ret += cls.from_dict(v, name=k)
else:
ret += cls([v], name=k, asList=is_iterable(v))
if name is not None:
ret = cls([ret], name=name)
return ret
MutableMapping.register(ParseResults)
def col (loc, strg):
"""Returns current column within a string, counting newlines as line separators.
The first column is number 1.
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See
:class:`ParserElement.parseString` for more
information on parsing strings containing ``<TAB>`` s, and suggested
methods to maintain a consistent view of the parsed string, the parse
location, and line and column positions within the parsed string.
"""
s = strg
return 1 if 0 < loc < len(s) and s[loc-1] == '\n' else loc - s.rfind("\n", 0, loc)
def lineno(loc, strg):
"""Returns current line number within a string, counting newlines as line separators.
The first line is number 1.
Note - the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See :class:`ParserElement.parseString`
for more information on parsing strings containing ``<TAB>`` s, and
suggested methods to maintain a consistent view of the parsed string, the
parse location, and line and column positions within the parsed string.
"""
return strg.count("\n", 0, loc) + 1
def line(loc, strg):
"""Returns the line of text containing loc within a string, counting newlines as line separators.
"""
lastCR = strg.rfind("\n", 0, loc)
nextCR = strg.find("\n", loc)
if nextCR >= 0:
return strg[lastCR + 1:nextCR]
else:
return strg[lastCR + 1:]
def _defaultStartDebugAction(instring, loc, expr):
print(("Match " + _ustr(expr) + " at loc " + _ustr(loc) + "(%d,%d)" % (lineno(loc, instring), col(loc, instring))))
def _defaultSuccessDebugAction(instring, startloc, endloc, expr, toks):
print("Matched " + _ustr(expr) + " -> " + str(toks.asList()))
def _defaultExceptionDebugAction(instring, loc, expr, exc):
print("Exception raised:" + _ustr(exc))
def nullDebugAction(*args):
"""'Do-nothing' debug action, to suppress debugging output during parsing."""
pass
# Only works on Python 3.x - nonlocal is toxic to Python 2 installs
#~ 'decorator to trim function calls to match the arity of the target'
#~ def _trim_arity(func, maxargs=3):
#~ if func in singleArgBuiltins:
#~ return lambda s,l,t: func(t)
#~ limit = 0
#~ foundArity = False
#~ def wrapper(*args):
#~ nonlocal limit,foundArity
#~ while 1:
#~ try:
#~ ret = func(*args[limit:])
#~ foundArity = True
#~ return ret
#~ except TypeError:
#~ if limit == maxargs or foundArity:
#~ raise
#~ limit += 1
#~ continue
#~ return wrapper
# this version is Python 2.x-3.x cross-compatible
'decorator to trim function calls to match the arity of the target'
def _trim_arity(func, maxargs=2):
if func in singleArgBuiltins:
return lambda s, l, t: func(t)
limit = [0]
foundArity = [False]
# traceback return data structure changed in Py3.5 - normalize back to plain tuples
if system_version[:2] >= (3, 5):
def extract_stack(limit=0):
# special handling for Python 3.5.0 - extra deep call stack by 1
offset = -3 if system_version == (3, 5, 0) else -2
frame_summary = traceback.extract_stack(limit=-offset + limit - 1)[offset]
return [frame_summary[:2]]
def extract_tb(tb, limit=0):
frames = traceback.extract_tb(tb, limit=limit)
frame_summary = frames[-1]
return [frame_summary[:2]]
else:
extract_stack = traceback.extract_stack
extract_tb = traceback.extract_tb
# synthesize what would be returned by traceback.extract_stack at the call to
# user's parse action 'func', so that we don't incur call penalty at parse time
LINE_DIFF = 6
# IF ANY CODE CHANGES, EVEN JUST COMMENTS OR BLANK LINES, BETWEEN THE NEXT LINE AND
# THE CALL TO FUNC INSIDE WRAPPER, LINE_DIFF MUST BE MODIFIED!!!!
this_line = extract_stack(limit=2)[-1]
pa_call_line_synth = (this_line[0], this_line[1] + LINE_DIFF)
def wrapper(*args):
while 1:
try:
ret = func(*args[limit[0]:])
foundArity[0] = True
return ret
except TypeError:
# re-raise TypeErrors if they did not come from our arity testing
if foundArity[0]:
raise
else:
try:
tb = sys.exc_info()[-1]
if not extract_tb(tb, limit=2)[-1][:2] == pa_call_line_synth:
raise
finally:
try:
del tb
except NameError:
pass
if limit[0] <= maxargs:
limit[0] += 1
continue
raise
# copy func name to wrapper for sensible debug output
func_name = "<parse action>"
try:
func_name = getattr(func, '__name__',
getattr(func, '__class__').__name__)
except Exception:
func_name = str(func)
wrapper.__name__ = func_name
return wrapper
class ParserElement(object):
"""Abstract base level parser element class."""
DEFAULT_WHITE_CHARS = " \n\t\r"
verbose_stacktrace = False
@staticmethod
def setDefaultWhitespaceChars(chars):
r"""
Overrides the default whitespace chars
Example::
# default whitespace chars are space, <TAB> and newline
OneOrMore(Word(alphas)).parseString("abc def\nghi jkl") # -> ['abc', 'def', 'ghi', 'jkl']
# change to just treat newline as significant
ParserElement.setDefaultWhitespaceChars(" \t")
OneOrMore(Word(alphas)).parseString("abc def\nghi jkl") # -> ['abc', 'def']
"""
ParserElement.DEFAULT_WHITE_CHARS = chars
@staticmethod
def inlineLiteralsUsing(cls):
"""
Set class to be used for inclusion of string literals into a parser.
Example::
# default literal class used is Literal
integer = Word(nums)
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
date_str.parseString("1999/12/31") # -> ['1999', '/', '12', '/', '31']
# change to Suppress
ParserElement.inlineLiteralsUsing(Suppress)
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
date_str.parseString("1999/12/31") # -> ['1999', '12', '31']
"""
ParserElement._literalStringClass = cls
@classmethod
def _trim_traceback(cls, tb):
while tb.tb_next:
tb = tb.tb_next
return tb
def __init__(self, savelist=False):
self.parseAction = list()
self.failAction = None
# ~ self.name = "<unknown>" # don't define self.name, let subclasses try/except upcall
self.strRepr = None
self.resultsName = None
self.saveAsList = savelist
self.skipWhitespace = True
self.whiteChars = set(ParserElement.DEFAULT_WHITE_CHARS)
self.copyDefaultWhiteChars = True
self.mayReturnEmpty = False # used when checking for left-recursion
self.keepTabs = False
self.ignoreExprs = list()
self.debug = False
self.streamlined = False
self.mayIndexError = True # used to optimize exception handling for subclasses that don't advance parse index
self.errmsg = ""
self.modalResults = True # used to mark results names as modal (report only last) or cumulative (list all)
self.debugActions = (None, None, None) # custom debug actions
self.re = None
self.callPreparse = True # used to avoid redundant calls to preParse
self.callDuringTry = False
def copy(self):
"""
Make a copy of this :class:`ParserElement`. Useful for defining
different parse actions for the same parsing pattern, using copies of
the original parse element.
Example::
integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
integerK = integer.copy().addParseAction(lambda toks: toks[0] * 1024) + Suppress("K")
integerM = integer.copy().addParseAction(lambda toks: toks[0] * 1024 * 1024) + Suppress("M")
print(OneOrMore(integerK | integerM | integer).parseString("5K 100 640K 256M"))
prints::
[5120, 100, 655360, 268435456]
Equivalent form of ``expr.copy()`` is just ``expr()``::
integerM = integer().addParseAction(lambda toks: toks[0] * 1024 * 1024) + Suppress("M")
"""
cpy = copy.copy(self)
cpy.parseAction = self.parseAction[:]
cpy.ignoreExprs = self.ignoreExprs[:]
if self.copyDefaultWhiteChars:
cpy.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
return cpy
def setName(self, name):
"""
Define name for this expression, makes debugging and exception messages clearer.
Example::
Word(nums).parseString("ABC") # -> Exception: Expected W:(0123...) (at char 0), (line:1, col:1)
Word(nums).setName("integer").parseString("ABC") # -> Exception: Expected integer (at char 0), (line:1, col:1)
"""
self.name = name
self.errmsg = "Expected " + self.name
if __diag__.enable_debug_on_named_expressions:
self.setDebug()
return self
def setResultsName(self, name, listAllMatches=False):
"""
Define name for referencing matching tokens as a nested attribute
of the returned parse results.
NOTE: this returns a *copy* of the original :class:`ParserElement` object;
this is so that the client can define a basic element, such as an
integer, and reference it in multiple places with different names.
You can also set results names using the abbreviated syntax,
``expr("name")`` in place of ``expr.setResultsName("name")``
- see :class:`__call__`.
Example::
date_str = (integer.setResultsName("year") + '/'
+ integer.setResultsName("month") + '/'
+ integer.setResultsName("day"))
# equivalent form:
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
"""
return self._setResultsName(name, listAllMatches)
def _setResultsName(self, name, listAllMatches=False):
newself = self.copy()
if name.endswith("*"):
name = name[:-1]
listAllMatches = True
newself.resultsName = name
newself.modalResults = not listAllMatches
return newself
def setBreak(self, breakFlag=True):
"""Method to invoke the Python pdb debugger when this element is
about to be parsed. Set ``breakFlag`` to True to enable, False to
disable.
"""
if breakFlag:
_parseMethod = self._parse
def breaker(instring, loc, doActions=True, callPreParse=True):
import pdb
# this call to pdb.set_trace() is intentional, not a checkin error
pdb.set_trace()
return _parseMethod(instring, loc, doActions, callPreParse)
breaker._originalParseMethod = _parseMethod
self._parse = breaker
else:
if hasattr(self._parse, "_originalParseMethod"):
self._parse = self._parse._originalParseMethod
return self
def setParseAction(self, *fns, **kwargs):
"""
Define one or more actions to perform when successfully matching parse element definition.
Parse action fn is a callable method with 0-3 arguments, called as ``fn(s, loc, toks)`` ,
``fn(loc, toks)`` , ``fn(toks)`` , or just ``fn()`` , where:
- s = the original string being parsed (see note below)
- loc = the location of the matching substring
- toks = a list of the matched tokens, packaged as a :class:`ParseResults` object
If the functions in fns modify the tokens, they can return them as the return
value from fn, and the modified list of tokens will replace the original.
Otherwise, fn does not need to return any value.
If None is passed as the parse action, all previously added parse actions for this
expression are cleared.
Optional keyword arguments:
- callDuringTry = (default= ``False``) indicate if parse action should be run during lookaheads and alternate testing
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See :class:`parseString for more
information on parsing strings containing ``<TAB>`` s, and suggested
methods to maintain a consistent view of the parsed string, the parse
location, and line and column positions within the parsed string.
Example::
integer = Word(nums)
date_str = integer + '/' + integer + '/' + integer
date_str.parseString("1999/12/31") # -> ['1999', '/', '12', '/', '31']
# use parse action to convert to ints at parse time
integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
date_str = integer + '/' + integer + '/' + integer
# note that integer fields are now ints, not strings
date_str.parseString("1999/12/31") # -> [1999, '/', 12, '/', 31]
"""
if list(fns) == [None,]:
self.parseAction = []
else:
if not all(callable(fn) for fn in fns):
raise TypeError("parse actions must be callable")
self.parseAction = list(map(_trim_arity, list(fns)))
self.callDuringTry = kwargs.get("callDuringTry", False)
return self
def addParseAction(self, *fns, **kwargs):
"""
Add one or more parse actions to expression's list of parse actions. See :class:`setParseAction`.
See examples in :class:`copy`.
"""
self.parseAction += list(map(_trim_arity, list(fns)))
self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False)
return self
def addCondition(self, *fns, **kwargs):
"""Add a boolean predicate function to expression's list of parse actions. See
:class:`setParseAction` for function call signatures. Unlike ``setParseAction``,
functions passed to ``addCondition`` need to return boolean success/fail of the condition.
Optional keyword arguments:
- message = define a custom message to be used in the raised exception
- fatal = if True, will raise ParseFatalException to stop parsing immediately; otherwise will raise ParseException
Example::
integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
year_int = integer.copy()
year_int.addCondition(lambda toks: toks[0] >= 2000, message="Only support years 2000 and later")
date_str = year_int + '/' + integer + '/' + integer
result = date_str.parseString("1999/12/31") # -> Exception: Only support years 2000 and later (at char 0), (line:1, col:1)
"""
for fn in fns:
self.parseAction.append(conditionAsParseAction(fn, message=kwargs.get('message'),
fatal=kwargs.get('fatal', False)))
self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False)
return self
def setFailAction(self, fn):
"""Define action to perform if parsing fails at this expression.
Fail acton fn is a callable function that takes the arguments
``fn(s, loc, expr, err)`` where:
- s = string being parsed
- loc = location where expression match was attempted and failed
- expr = the parse expression that failed
- err = the exception thrown
The function returns no value. It may throw :class:`ParseFatalException`
if it is desired to stop parsing immediately."""
self.failAction = fn
return self
def _skipIgnorables(self, instring, loc):
exprsFound = True
while exprsFound:
exprsFound = False
for e in self.ignoreExprs:
try:
while 1:
loc, dummy = e._parse(instring, loc)
exprsFound = True
except ParseException:
pass
return loc
def preParse(self, instring, loc):
if self.ignoreExprs:
loc = self._skipIgnorables(instring, loc)
if self.skipWhitespace:
wt = self.whiteChars
instrlen = len(instring)
while loc < instrlen and instring[loc] in wt:
loc += 1
return loc
def parseImpl(self, instring, loc, doActions=True):
return loc, []
def postParse(self, instring, loc, tokenlist):
return tokenlist
# ~ @profile
def _parseNoCache(self, instring, loc, doActions=True, callPreParse=True):
TRY, MATCH, FAIL = 0, 1, 2
debugging = (self.debug) # and doActions)
if debugging or self.failAction:
# ~ print ("Match", self, "at loc", loc, "(%d, %d)" % (lineno(loc, instring), col(loc, instring)))
if self.debugActions[TRY]:
self.debugActions[TRY](instring, loc, self)
try:
if callPreParse and self.callPreparse:
preloc = self.preParse(instring, loc)
else:
preloc = loc
tokensStart = preloc
if self.mayIndexError or preloc >= len(instring):
try:
loc, tokens = self.parseImpl(instring, preloc, doActions)
except IndexError:
raise ParseException(instring, len(instring), self.errmsg, self)
else:
loc, tokens = self.parseImpl(instring, preloc, doActions)
except Exception as err:
# ~ print ("Exception raised:", err)
if self.debugActions[FAIL]:
self.debugActions[FAIL](instring, tokensStart, self, err)
if self.failAction:
self.failAction(instring, tokensStart, self, err)
raise
else:
if callPreParse and self.callPreparse:
preloc = self.preParse(instring, loc)
else:
preloc = loc
tokensStart = preloc
if self.mayIndexError or preloc >= len(instring):
try:
loc, tokens = self.parseImpl(instring, preloc, doActions)
except IndexError:
raise ParseException(instring, len(instring), self.errmsg, self)
else:
loc, tokens = self.parseImpl(instring, preloc, doActions)
tokens = self.postParse(instring, loc, tokens)
retTokens = ParseResults(tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults)
if self.parseAction and (doActions or self.callDuringTry):
if debugging:
try:
for fn in self.parseAction:
try:
tokens = fn(instring, tokensStart, retTokens)
except IndexError as parse_action_exc:
exc = ParseException("exception raised in parse action")
exc.__cause__ = parse_action_exc
raise exc
if tokens is not None and tokens is not retTokens:
retTokens = ParseResults(tokens,
self.resultsName,
asList=self.saveAsList and isinstance(tokens, (ParseResults, list)),
modal=self.modalResults)
except Exception as err:
# ~ print "Exception raised in user parse action:", err
if self.debugActions[FAIL]:
self.debugActions[FAIL](instring, tokensStart, self, err)
raise
else:
for fn in self.parseAction:
try:
tokens = fn(instring, tokensStart, retTokens)
except IndexError as parse_action_exc:
exc = ParseException("exception raised in parse action")
exc.__cause__ = parse_action_exc
raise exc
if tokens is not None and tokens is not retTokens:
retTokens = ParseResults(tokens,
self.resultsName,
asList=self.saveAsList and isinstance(tokens, (ParseResults, list)),
modal=self.modalResults)
if debugging:
# ~ print ("Matched", self, "->", retTokens.asList())
if self.debugActions[MATCH]:
self.debugActions[MATCH](instring, tokensStart, loc, self, retTokens)
return loc, retTokens
def tryParse(self, instring, loc):
try:
return self._parse(instring, loc, doActions=False)[0]
except ParseFatalException:
raise ParseException(instring, loc, self.errmsg, self)
def canParseNext(self, instring, loc):
try:
self.tryParse(instring, loc)
except (ParseException, IndexError):
return False
else:
return True
class _UnboundedCache(object):
def __init__(self):
cache = {}
self.not_in_cache = not_in_cache = object()
def get(self, key):
return cache.get(key, not_in_cache)
def set(self, key, value):
cache[key] = value
def clear(self):
cache.clear()
def cache_len(self):
return len(cache)
self.get = types.MethodType(get, self)
self.set = types.MethodType(set, self)
self.clear = types.MethodType(clear, self)
self.__len__ = types.MethodType(cache_len, self)
if _OrderedDict is not None:
class _FifoCache(object):
def __init__(self, size):
self.not_in_cache = not_in_cache = object()
cache = _OrderedDict()
def get(self, key):
return cache.get(key, not_in_cache)
def set(self, key, value):
cache[key] = value
while len(cache) > size:
try:
cache.popitem(False)
except KeyError:
pass
def clear(self):
cache.clear()
def cache_len(self):
return len(cache)
self.get = types.MethodType(get, self)
self.set = types.MethodType(set, self)
self.clear = types.MethodType(clear, self)
self.__len__ = types.MethodType(cache_len, self)
else:
class _FifoCache(object):
def __init__(self, size):
self.not_in_cache = not_in_cache = object()
cache = {}
key_fifo = collections.deque([], size)
def get(self, key):
return cache.get(key, not_in_cache)
def set(self, key, value):
cache[key] = value
while len(key_fifo) > size:
cache.pop(key_fifo.popleft(), None)
key_fifo.append(key)
def clear(self):
cache.clear()
key_fifo.clear()
def cache_len(self):
return len(cache)
self.get = types.MethodType(get, self)
self.set = types.MethodType(set, self)
self.clear = types.MethodType(clear, self)
self.__len__ = types.MethodType(cache_len, self)
# argument cache for optimizing repeated calls when backtracking through recursive expressions
packrat_cache = {} # this is set later by enabledPackrat(); this is here so that resetCache() doesn't fail
packrat_cache_lock = RLock()
packrat_cache_stats = [0, 0]
# this method gets repeatedly called during backtracking with the same arguments -
# we can cache these arguments and save ourselves the trouble of re-parsing the contained expression
def _parseCache(self, instring, loc, doActions=True, callPreParse=True):
HIT, MISS = 0, 1
lookup = (self, instring, loc, callPreParse, doActions)
with ParserElement.packrat_cache_lock:
cache = ParserElement.packrat_cache
value = cache.get(lookup)
if value is cache.not_in_cache:
ParserElement.packrat_cache_stats[MISS] += 1
try:
value = self._parseNoCache(instring, loc, doActions, callPreParse)
except ParseBaseException as pe:
# cache a copy of the exception, without the traceback
cache.set(lookup, pe.__class__(*pe.args))
raise
else:
cache.set(lookup, (value[0], value[1].copy()))
return value
else:
ParserElement.packrat_cache_stats[HIT] += 1
if isinstance(value, Exception):
raise value
return value[0], value[1].copy()
_parse = _parseNoCache
@staticmethod
def resetCache():
ParserElement.packrat_cache.clear()
ParserElement.packrat_cache_stats[:] = [0] * len(ParserElement.packrat_cache_stats)
_packratEnabled = False
@staticmethod
def enablePackrat(cache_size_limit=128):
"""Enables "packrat" parsing, which adds memoizing to the parsing logic.
Repeated parse attempts at the same string location (which happens
often in many complex grammars) can immediately return a cached value,
instead of re-executing parsing/validating code. Memoizing is done of
both valid results and parsing exceptions.
Parameters:
- cache_size_limit - (default= ``128``) - if an integer value is provided
will limit the size of the packrat cache; if None is passed, then
the cache size will be unbounded; if 0 is passed, the cache will
be effectively disabled.
This speedup may break existing programs that use parse actions that
have side-effects. For this reason, packrat parsing is disabled when
you first import pyparsing. To activate the packrat feature, your
program must call the class method :class:`ParserElement.enablePackrat`.
For best results, call ``enablePackrat()`` immediately after
importing pyparsing.
Example::
import pyparsing
pyparsing.ParserElement.enablePackrat()
"""
if not ParserElement._packratEnabled:
ParserElement._packratEnabled = True
if cache_size_limit is None:
ParserElement.packrat_cache = ParserElement._UnboundedCache()
else:
ParserElement.packrat_cache = ParserElement._FifoCache(cache_size_limit)
ParserElement._parse = ParserElement._parseCache
def parseString(self, instring, parseAll=False):
"""
Execute the parse expression with the given string.
This is the main interface to the client code, once the complete
expression has been built.
Returns the parsed data as a :class:`ParseResults` object, which may be
accessed as a list, or as a dict or object with attributes if the given parser
includes results names.
If you want the grammar to require that the entire input string be
successfully parsed, then set ``parseAll`` to True (equivalent to ending
the grammar with ``StringEnd()``).
Note: ``parseString`` implicitly calls ``expandtabs()`` on the input string,
in order to report proper column numbers in parse actions.
If the input string contains tabs and
the grammar uses parse actions that use the ``loc`` argument to index into the
string being parsed, you can ensure you have a consistent view of the input
string by:
- calling ``parseWithTabs`` on your grammar before calling ``parseString``
(see :class:`parseWithTabs`)
- define your parse action using the full ``(s, loc, toks)`` signature, and
reference the input string using the parse action's ``s`` argument
- explictly expand the tabs in your input string before calling
``parseString``
Example::
Word('a').parseString('aaaaabaaa') # -> ['aaaaa']
Word('a').parseString('aaaaabaaa', parseAll=True) # -> Exception: Expected end of text
"""
ParserElement.resetCache()
if not self.streamlined:
self.streamline()
# ~ self.saveAsList = True
for e in self.ignoreExprs:
e.streamline()
if not self.keepTabs:
instring = instring.expandtabs()
try:
loc, tokens = self._parse(instring, 0)
if parseAll:
loc = self.preParse(instring, loc)
se = Empty() + StringEnd()
se._parse(instring, loc)
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clearing out pyparsing internal stack trace
if getattr(exc, '__traceback__', None) is not None:
exc.__traceback__ = self._trim_traceback(exc.__traceback__)
raise exc
else:
return tokens
def scanString(self, instring, maxMatches=_MAX_INT, overlap=False):
"""
Scan the input string for expression matches. Each match will return the
matching tokens, start location, and end location. May be called with optional
``maxMatches`` argument, to clip scanning after 'n' matches are found. If
``overlap`` is specified, then overlapping matches will be reported.
Note that the start and end locations are reported relative to the string
being parsed. See :class:`parseString` for more information on parsing
strings with embedded tabs.
Example::
source = "sldjf123lsdjjkf345sldkjf879lkjsfd987"
print(source)
for tokens, start, end in Word(alphas).scanString(source):
print(' '*start + '^'*(end-start))
print(' '*start + tokens[0])
prints::
sldjf123lsdjjkf345sldkjf879lkjsfd987
^^^^^
sldjf
^^^^^^^
lsdjjkf
^^^^^^
sldkjf
^^^^^^
lkjsfd
"""
if not self.streamlined:
self.streamline()
for e in self.ignoreExprs:
e.streamline()
if not self.keepTabs:
instring = _ustr(instring).expandtabs()
instrlen = len(instring)
loc = 0
preparseFn = self.preParse
parseFn = self._parse
ParserElement.resetCache()
matches = 0
try:
while loc <= instrlen and matches < maxMatches:
try:
preloc = preparseFn(instring, loc)
nextLoc, tokens = parseFn(instring, preloc, callPreParse=False)
except ParseException:
loc = preloc + 1
else:
if nextLoc > loc:
matches += 1
yield tokens, preloc, nextLoc
if overlap:
nextloc = preparseFn(instring, loc)
if nextloc > loc:
loc = nextLoc
else:
loc += 1
else:
loc = nextLoc
else:
loc = preloc + 1
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clearing out pyparsing internal stack trace
if getattr(exc, '__traceback__', None) is not None:
exc.__traceback__ = self._trim_traceback(exc.__traceback__)
raise exc
def transformString(self, instring):
"""
Extension to :class:`scanString`, to modify matching text with modified tokens that may
be returned from a parse action. To use ``transformString``, define a grammar and
attach a parse action to it that modifies the returned token list.
Invoking ``transformString()`` on a target string will then scan for matches,
and replace the matched text patterns according to the logic in the parse
action. ``transformString()`` returns the resulting transformed string.
Example::
wd = Word(alphas)
wd.setParseAction(lambda toks: toks[0].title())
print(wd.transformString("now is the winter of our discontent made glorious summer by this sun of york."))
prints::
Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York.
"""
out = []
lastE = 0
# force preservation of <TAB>s, to minimize unwanted transformation of string, and to
# keep string locs straight between transformString and scanString
self.keepTabs = True
try:
for t, s, e in self.scanString(instring):
out.append(instring[lastE:s])
if t:
if isinstance(t, ParseResults):
out += t.asList()
elif isinstance(t, list):
out += t
else:
out.append(t)
lastE = e
out.append(instring[lastE:])
out = [o for o in out if o]
return "".join(map(_ustr, _flatten(out)))
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clearing out pyparsing internal stack trace
if getattr(exc, '__traceback__', None) is not None:
exc.__traceback__ = self._trim_traceback(exc.__traceback__)
raise exc
def searchString(self, instring, maxMatches=_MAX_INT):
"""
Another extension to :class:`scanString`, simplifying the access to the tokens found
to match the given parse expression. May be called with optional
``maxMatches`` argument, to clip searching after 'n' matches are found.
Example::
# a capitalized word starts with an uppercase letter, followed by zero or more lowercase letters
cap_word = Word(alphas.upper(), alphas.lower())
print(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity"))
# the sum() builtin can be used to merge results into a single ParseResults object
print(sum(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity")))
prints::
[['More'], ['Iron'], ['Lead'], ['Gold'], ['I'], ['Electricity']]
['More', 'Iron', 'Lead', 'Gold', 'I', 'Electricity']
"""
try:
return ParseResults([t for t, s, e in self.scanString(instring, maxMatches)])
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clearing out pyparsing internal stack trace
if getattr(exc, '__traceback__', None) is not None:
exc.__traceback__ = self._trim_traceback(exc.__traceback__)
raise exc
def split(self, instring, maxsplit=_MAX_INT, includeSeparators=False):
"""
Generator method to split a string using the given expression as a separator.
May be called with optional ``maxsplit`` argument, to limit the number of splits;
and the optional ``includeSeparators`` argument (default= ``False``), if the separating
matching text should be included in the split results.
Example::
punc = oneOf(list(".,;:/-!?"))
print(list(punc.split("This, this?, this sentence, is badly punctuated!")))
prints::
['This', ' this', '', ' this sentence', ' is badly punctuated', '']
"""
splits = 0
last = 0
for t, s, e in self.scanString(instring, maxMatches=maxsplit):
yield instring[last:s]
if includeSeparators:
yield t[0]
last = e
yield instring[last:]
def __add__(self, other):
"""
Implementation of + operator - returns :class:`And`. Adding strings to a ParserElement
converts them to :class:`Literal`s by default.
Example::
greet = Word(alphas) + "," + Word(alphas) + "!"
hello = "Hello, World!"
print (hello, "->", greet.parseString(hello))
prints::
Hello, World! -> ['Hello', ',', 'World', '!']
``...`` may be used as a parse expression as a short form of :class:`SkipTo`.
Literal('start') + ... + Literal('end')
is equivalent to:
Literal('start') + SkipTo('end')("_skipped*") + Literal('end')
Note that the skipped text is returned with '_skipped' as a results name,
and to support having multiple skips in the same parser, the value returned is
a list of all skipped text.
"""
if other is Ellipsis:
return _PendingSkip(self)
if isinstance(other, basestring):
other = self._literalStringClass(other)
if not isinstance(other, ParserElement):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return And([self, other])
def __radd__(self, other):
"""
Implementation of + operator when left operand is not a :class:`ParserElement`
"""
if other is Ellipsis:
return SkipTo(self)("_skipped*") + self
if isinstance(other, basestring):
other = self._literalStringClass(other)
if not isinstance(other, ParserElement):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other + self
def __sub__(self, other):
"""
Implementation of - operator, returns :class:`And` with error stop
"""
if isinstance(other, basestring):
other = self._literalStringClass(other)
if not isinstance(other, ParserElement):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return self + And._ErrorStop() + other
def __rsub__(self, other):
"""
Implementation of - operator when left operand is not a :class:`ParserElement`
"""
if isinstance(other, basestring):
other = self._literalStringClass(other)
if not isinstance(other, ParserElement):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other - self
def __mul__(self, other):
"""
Implementation of * operator, allows use of ``expr * 3`` in place of
``expr + expr + expr``. Expressions may also me multiplied by a 2-integer
tuple, similar to ``{min, max}`` multipliers in regular expressions. Tuples
may also include ``None`` as in:
- ``expr*(n, None)`` or ``expr*(n, )`` is equivalent
to ``expr*n + ZeroOrMore(expr)``
(read as "at least n instances of ``expr``")
- ``expr*(None, n)`` is equivalent to ``expr*(0, n)``
(read as "0 to n instances of ``expr``")
- ``expr*(None, None)`` is equivalent to ``ZeroOrMore(expr)``
- ``expr*(1, None)`` is equivalent to ``OneOrMore(expr)``
Note that ``expr*(None, n)`` does not raise an exception if
more than n exprs exist in the input stream; that is,
``expr*(None, n)`` does not enforce a maximum number of expr
occurrences. If this behavior is desired, then write
``expr*(None, n) + ~expr``
"""
if other is Ellipsis:
other = (0, None)
elif isinstance(other, tuple) and other[:1] == (Ellipsis,):
other = ((0, ) + other[1:] + (None,))[:2]
if isinstance(other, int):
minElements, optElements = other, 0
elif isinstance(other, tuple):
other = tuple(o if o is not Ellipsis else None for o in other)
other = (other + (None, None))[:2]
if other[0] is None:
other = (0, other[1])
if isinstance(other[0], int) and other[1] is None:
if other[0] == 0:
return ZeroOrMore(self)
if other[0] == 1:
return OneOrMore(self)
else:
return self * other[0] + ZeroOrMore(self)
elif isinstance(other[0], int) and isinstance(other[1], int):
minElements, optElements = other
optElements -= minElements
else:
raise TypeError("cannot multiply 'ParserElement' and ('%s', '%s') objects", type(other[0]), type(other[1]))
else:
raise TypeError("cannot multiply 'ParserElement' and '%s' objects", type(other))
if minElements < 0:
raise ValueError("cannot multiply ParserElement by negative value")
if optElements < 0:
raise ValueError("second tuple value must be greater or equal to first tuple value")
if minElements == optElements == 0:
raise ValueError("cannot multiply ParserElement by 0 or (0, 0)")
if optElements:
def makeOptionalList(n):
if n > 1:
return Optional(self + makeOptionalList(n - 1))
else:
return Optional(self)
if minElements:
if minElements == 1:
ret = self + makeOptionalList(optElements)
else:
ret = And([self] * minElements) + makeOptionalList(optElements)
else:
ret = makeOptionalList(optElements)
else:
if minElements == 1:
ret = self
else:
ret = And([self] * minElements)
return ret
def __rmul__(self, other):
return self.__mul__(other)
def __or__(self, other):
"""
Implementation of | operator - returns :class:`MatchFirst`
"""
if other is Ellipsis:
return _PendingSkip(self, must_skip=True)
if isinstance(other, basestring):
other = self._literalStringClass(other)
if not isinstance(other, ParserElement):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return MatchFirst([self, other])
def __ror__(self, other):
"""
Implementation of | operator when left operand is not a :class:`ParserElement`
"""
if isinstance(other, basestring):
other = self._literalStringClass(other)
if not isinstance(other, ParserElement):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other | self
def __xor__(self, other):
"""
Implementation of ^ operator - returns :class:`Or`
"""
if isinstance(other, basestring):
other = self._literalStringClass(other)
if not isinstance(other, ParserElement):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return Or([self, other])
def __rxor__(self, other):
"""
Implementation of ^ operator when left operand is not a :class:`ParserElement`
"""
if isinstance(other, basestring):
other = self._literalStringClass(other)
if not isinstance(other, ParserElement):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other ^ self
def __and__(self, other):
"""
Implementation of & operator - returns :class:`Each`
"""
if isinstance(other, basestring):
other = self._literalStringClass(other)
if not isinstance(other, ParserElement):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return Each([self, other])
def __rand__(self, other):
"""
Implementation of & operator when left operand is not a :class:`ParserElement`
"""
if isinstance(other, basestring):
other = self._literalStringClass(other)
if not isinstance(other, ParserElement):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other & self
def __invert__(self):
"""
Implementation of ~ operator - returns :class:`NotAny`
"""
return NotAny(self)
def __iter__(self):
# must implement __iter__ to override legacy use of sequential access to __getitem__ to
# iterate over a sequence
raise TypeError('%r object is not iterable' % self.__class__.__name__)
def __getitem__(self, key):
"""
use ``[]`` indexing notation as a short form for expression repetition:
- ``expr[n]`` is equivalent to ``expr*n``
- ``expr[m, n]`` is equivalent to ``expr*(m, n)``
- ``expr[n, ...]`` or ``expr[n,]`` is equivalent
to ``expr*n + ZeroOrMore(expr)``
(read as "at least n instances of ``expr``")
- ``expr[..., n]`` is equivalent to ``expr*(0, n)``
(read as "0 to n instances of ``expr``")
- ``expr[...]`` and ``expr[0, ...]`` are equivalent to ``ZeroOrMore(expr)``
- ``expr[1, ...]`` is equivalent to ``OneOrMore(expr)``
``None`` may be used in place of ``...``.
Note that ``expr[..., n]`` and ``expr[m, n]``do not raise an exception
if more than ``n`` ``expr``s exist in the input stream. If this behavior is
desired, then write ``expr[..., n] + ~expr``.
"""
# convert single arg keys to tuples
try:
if isinstance(key, str):
key = (key,)
iter(key)
except TypeError:
key = (key, key)
if len(key) > 2:
warnings.warn("only 1 or 2 index arguments supported ({0}{1})".format(key[:5],
'... [{0}]'.format(len(key))
if len(key) > 5 else ''))
# clip to 2 elements
ret = self * tuple(key[:2])
return ret
def __call__(self, name=None):
"""
Shortcut for :class:`setResultsName`, with ``listAllMatches=False``.
If ``name`` is given with a trailing ``'*'`` character, then ``listAllMatches`` will be
passed as ``True``.
If ``name` is omitted, same as calling :class:`copy`.
Example::
# these are equivalent
userdata = Word(alphas).setResultsName("name") + Word(nums + "-").setResultsName("socsecno")
userdata = Word(alphas)("name") + Word(nums + "-")("socsecno")
"""
if name is not None:
return self._setResultsName(name)
else:
return self.copy()
def suppress(self):
"""
Suppresses the output of this :class:`ParserElement`; useful to keep punctuation from
cluttering up returned output.
"""
return Suppress(self)
def leaveWhitespace(self):
"""
Disables the skipping of whitespace before matching the characters in the
:class:`ParserElement`'s defined pattern. This is normally only used internally by
the pyparsing module, but may be needed in some whitespace-sensitive grammars.
"""
self.skipWhitespace = False
return self
def setWhitespaceChars(self, chars):
"""
Overrides the default whitespace chars
"""
self.skipWhitespace = True
self.whiteChars = chars
self.copyDefaultWhiteChars = False
return self
def parseWithTabs(self):
"""
Overrides default behavior to expand ``<TAB>``s to spaces before parsing the input string.
Must be called before ``parseString`` when the input grammar contains elements that
match ``<TAB>`` characters.
"""
self.keepTabs = True
return self
def ignore(self, other):
"""
Define expression to be ignored (e.g., comments) while doing pattern
matching; may be called repeatedly, to define multiple comment or other
ignorable patterns.
Example::
patt = OneOrMore(Word(alphas))
patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj']
patt.ignore(cStyleComment)
patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj', 'lskjd']
"""
if isinstance(other, basestring):
other = Suppress(other)
if isinstance(other, Suppress):
if other not in self.ignoreExprs:
self.ignoreExprs.append(other)
else:
self.ignoreExprs.append(Suppress(other.copy()))
return self
def setDebugActions(self, startAction, successAction, exceptionAction):
"""
Enable display of debugging messages while doing pattern matching.
"""
self.debugActions = (startAction or _defaultStartDebugAction,
successAction or _defaultSuccessDebugAction,
exceptionAction or _defaultExceptionDebugAction)
self.debug = True
return self
def setDebug(self, flag=True):
"""
Enable display of debugging messages while doing pattern matching.
Set ``flag`` to True to enable, False to disable.
Example::
wd = Word(alphas).setName("alphaword")
integer = Word(nums).setName("numword")
term = wd | integer
# turn on debugging for wd
wd.setDebug()
OneOrMore(term).parseString("abc 123 xyz 890")
prints::
Match alphaword at loc 0(1,1)
Matched alphaword -> ['abc']
Match alphaword at loc 3(1,4)
Exception raised:Expected alphaword (at char 4), (line:1, col:5)
Match alphaword at loc 7(1,8)
Matched alphaword -> ['xyz']
Match alphaword at loc 11(1,12)
Exception raised:Expected alphaword (at char 12), (line:1, col:13)
Match alphaword at loc 15(1,16)
Exception raised:Expected alphaword (at char 15), (line:1, col:16)
The output shown is that produced by the default debug actions - custom debug actions can be
specified using :class:`setDebugActions`. Prior to attempting
to match the ``wd`` expression, the debugging message ``"Match <exprname> at loc <n>(<line>,<col>)"``
is shown. Then if the parse succeeds, a ``"Matched"`` message is shown, or an ``"Exception raised"``
message is shown. Also note the use of :class:`setName` to assign a human-readable name to the expression,
which makes debugging and exception messages easier to understand - for instance, the default
name created for the :class:`Word` expression without calling ``setName`` is ``"W:(ABCD...)"``.
"""
if flag:
self.setDebugActions(_defaultStartDebugAction, _defaultSuccessDebugAction, _defaultExceptionDebugAction)
else:
self.debug = False
return self
def __str__(self):
return self.name
def __repr__(self):
return _ustr(self)
def streamline(self):
self.streamlined = True
self.strRepr = None
return self
def checkRecursion(self, parseElementList):
pass
def validate(self, validateTrace=None):
"""
Check defined expressions for valid structure, check for infinite recursive definitions.
"""
self.checkRecursion([])
def parseFile(self, file_or_filename, parseAll=False):
"""
Execute the parse expression on the given file or filename.
If a filename is specified (instead of a file object),
the entire file is opened, read, and closed before parsing.
"""
try:
file_contents = file_or_filename.read()
except AttributeError:
with open(file_or_filename, "r") as f:
file_contents = f.read()
try:
return self.parseString(file_contents, parseAll)
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clearing out pyparsing internal stack trace
if getattr(exc, '__traceback__', None) is not None:
exc.__traceback__ = self._trim_traceback(exc.__traceback__)
raise exc
def __eq__(self, other):
if self is other:
return True
elif isinstance(other, basestring):
return self.matches(other)
elif isinstance(other, ParserElement):
return vars(self) == vars(other)
return False
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return id(self)
def __req__(self, other):
return self == other
def __rne__(self, other):
return not (self == other)
def matches(self, testString, parseAll=True):
"""
Method for quick testing of a parser against a test string. Good for simple
inline microtests of sub expressions while building up larger parser.
Parameters:
- testString - to test against this expression for a match
- parseAll - (default= ``True``) - flag to pass to :class:`parseString` when running tests
Example::
expr = Word(nums)
assert expr.matches("100")
"""
try:
self.parseString(_ustr(testString), parseAll=parseAll)
return True
except ParseBaseException:
return False
def runTests(self, tests, parseAll=True, comment='#',
fullDump=True, printResults=True, failureTests=False, postParse=None,
file=None):
"""
Execute the parse expression on a series of test strings, showing each
test, the parsed results or where the parse failed. Quick and easy way to
run a parse expression against a list of sample strings.
Parameters:
- tests - a list of separate test strings, or a multiline string of test strings
- parseAll - (default= ``True``) - flag to pass to :class:`parseString` when running tests
- comment - (default= ``'#'``) - expression for indicating embedded comments in the test
string; pass None to disable comment filtering
- fullDump - (default= ``True``) - dump results as list followed by results names in nested outline;
if False, only dump nested list
- printResults - (default= ``True``) prints test output to stdout
- failureTests - (default= ``False``) indicates if these tests are expected to fail parsing
- postParse - (default= ``None``) optional callback for successful parse results; called as
`fn(test_string, parse_results)` and returns a string to be added to the test output
- file - (default=``None``) optional file-like object to which test output will be written;
if None, will default to ``sys.stdout``
Returns: a (success, results) tuple, where success indicates that all tests succeeded
(or failed if ``failureTests`` is True), and the results contain a list of lines of each
test's output
Example::
number_expr = pyparsing_common.number.copy()
result = number_expr.runTests('''
# unsigned integer
100
# negative integer
-100
# float with scientific notation
6.02e23
# integer with scientific notation
1e-12
''')
print("Success" if result[0] else "Failed!")
result = number_expr.runTests('''
# stray character
100Z
# missing leading digit before '.'
-.100
# too many '.'
3.14.159
''', failureTests=True)
print("Success" if result[0] else "Failed!")
prints::
# unsigned integer
100
[100]
# negative integer
-100
[-100]
# float with scientific notation
6.02e23
[6.02e+23]
# integer with scientific notation
1e-12
[1e-12]
Success
# stray character
100Z
^
FAIL: Expected end of text (at char 3), (line:1, col:4)
# missing leading digit before '.'
-.100
^
FAIL: Expected {real number with scientific notation | real number | signed integer} (at char 0), (line:1, col:1)
# too many '.'
3.14.159
^
FAIL: Expected end of text (at char 4), (line:1, col:5)
Success
Each test string must be on a single line. If you want to test a string that spans multiple
lines, create a test like this::
expr.runTest(r"this is a test\\n of strings that spans \\n 3 lines")
(Note that this is a raw string literal, you must include the leading 'r'.)
"""
if isinstance(tests, basestring):
tests = list(map(str.strip, tests.rstrip().splitlines()))
if isinstance(comment, basestring):
comment = Literal(comment)
if file is None:
file = sys.stdout
print_ = file.write
allResults = []
comments = []
success = True
NL = Literal(r'\n').addParseAction(replaceWith('\n')).ignore(quotedString)
BOM = u'\ufeff'
for t in tests:
if comment is not None and comment.matches(t, False) or comments and not t:
comments.append(t)
continue
if not t:
continue
out = ['\n' + '\n'.join(comments) if comments else '', t]
comments = []
try:
# convert newline marks to actual newlines, and strip leading BOM if present
t = NL.transformString(t.lstrip(BOM))
result = self.parseString(t, parseAll=parseAll)
except ParseBaseException as pe:
fatal = "(FATAL)" if isinstance(pe, ParseFatalException) else ""
if '\n' in t:
out.append(line(pe.loc, t))
out.append(' ' * (col(pe.loc, t) - 1) + '^' + fatal)
else:
out.append(' ' * pe.loc + '^' + fatal)
out.append("FAIL: " + str(pe))
success = success and failureTests
result = pe
except Exception as exc:
out.append("FAIL-EXCEPTION: " + str(exc))
success = success and failureTests
result = exc
else:
success = success and not failureTests
if postParse is not None:
try:
pp_value = postParse(t, result)
if pp_value is not None:
if isinstance(pp_value, ParseResults):
out.append(pp_value.dump())
else:
out.append(str(pp_value))
else:
out.append(result.dump())
except Exception as e:
out.append(result.dump(full=fullDump))
out.append("{0} failed: {1}: {2}".format(postParse.__name__, type(e).__name__, e))
else:
out.append(result.dump(full=fullDump))
if printResults:
if fullDump:
out.append('')
print_('\n'.join(out))
allResults.append((t, result))
return success, allResults
class _PendingSkip(ParserElement):
# internal placeholder class to hold a place were '...' is added to a parser element,
# once another ParserElement is added, this placeholder will be replaced with a SkipTo
def __init__(self, expr, must_skip=False):
super(_PendingSkip, self).__init__()
self.strRepr = str(expr + Empty()).replace('Empty', '...')
self.name = self.strRepr
self.anchor = expr
self.must_skip = must_skip
def __add__(self, other):
skipper = SkipTo(other).setName("...")("_skipped*")
if self.must_skip:
def must_skip(t):
if not t._skipped or t._skipped.asList() == ['']:
del t[0]
t.pop("_skipped", None)
def show_skip(t):
if t._skipped.asList()[-1:] == ['']:
skipped = t.pop('_skipped')
t['_skipped'] = 'missing <' + repr(self.anchor) + '>'
return (self.anchor + skipper().addParseAction(must_skip)
| skipper().addParseAction(show_skip)) + other
return self.anchor + skipper + other
def __repr__(self):
return self.strRepr
def parseImpl(self, *args):
raise Exception("use of `...` expression without following SkipTo target expression")
class Token(ParserElement):
"""Abstract :class:`ParserElement` subclass, for defining atomic
matching patterns.
"""
def __init__(self):
super(Token, self).__init__(savelist=False)
class Empty(Token):
"""An empty token, will always match.
"""
def __init__(self):
super(Empty, self).__init__()
self.name = "Empty"
self.mayReturnEmpty = True
self.mayIndexError = False
class NoMatch(Token):
"""A token that will never match.
"""
def __init__(self):
super(NoMatch, self).__init__()
self.name = "NoMatch"
self.mayReturnEmpty = True
self.mayIndexError = False
self.errmsg = "Unmatchable token"
def parseImpl(self, instring, loc, doActions=True):
raise ParseException(instring, loc, self.errmsg, self)
class Literal(Token):
"""Token to exactly match a specified string.
Example::
Literal('blah').parseString('blah') # -> ['blah']
Literal('blah').parseString('blahfooblah') # -> ['blah']
Literal('blah').parseString('bla') # -> Exception: Expected "blah"
For case-insensitive matching, use :class:`CaselessLiteral`.
For keyword matching (force word break before and after the matched string),
use :class:`Keyword` or :class:`CaselessKeyword`.
"""
def __init__(self, matchString):
super(Literal, self).__init__()
self.match = matchString
self.matchLen = len(matchString)
try:
self.firstMatchChar = matchString[0]
except IndexError:
warnings.warn("null string passed to Literal; use Empty() instead",
SyntaxWarning, stacklevel=2)
self.__class__ = Empty
self.name = '"%s"' % _ustr(self.match)
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = False
self.mayIndexError = False
# Performance tuning: modify __class__ to select
# a parseImpl optimized for single-character check
if self.matchLen == 1 and type(self) is Literal:
self.__class__ = _SingleCharLiteral
def parseImpl(self, instring, loc, doActions=True):
if instring[loc] == self.firstMatchChar and instring.startswith(self.match, loc):
return loc + self.matchLen, self.match
raise ParseException(instring, loc, self.errmsg, self)
class _SingleCharLiteral(Literal):
def parseImpl(self, instring, loc, doActions=True):
if instring[loc] == self.firstMatchChar:
return loc + 1, self.match
raise ParseException(instring, loc, self.errmsg, self)
_L = Literal
ParserElement._literalStringClass = Literal
class Keyword(Token):
"""Token to exactly match a specified string as a keyword, that is,
it must be immediately followed by a non-keyword character. Compare
with :class:`Literal`:
- ``Literal("if")`` will match the leading ``'if'`` in
``'ifAndOnlyIf'``.
- ``Keyword("if")`` will not; it will only match the leading
``'if'`` in ``'if x=1'``, or ``'if(y==2)'``
Accepts two optional constructor arguments in addition to the
keyword string:
- ``identChars`` is a string of characters that would be valid
identifier characters, defaulting to all alphanumerics + "_" and
"$"
- ``caseless`` allows case-insensitive matching, default is ``False``.
Example::
Keyword("start").parseString("start") # -> ['start']
Keyword("start").parseString("starting") # -> Exception
For case-insensitive matching, use :class:`CaselessKeyword`.
"""
DEFAULT_KEYWORD_CHARS = alphanums + "_$"
def __init__(self, matchString, identChars=None, caseless=False):
super(Keyword, self).__init__()
if identChars is None:
identChars = Keyword.DEFAULT_KEYWORD_CHARS
self.match = matchString
self.matchLen = len(matchString)
try:
self.firstMatchChar = matchString[0]
except IndexError:
warnings.warn("null string passed to Keyword; use Empty() instead",
SyntaxWarning, stacklevel=2)
self.name = '"%s"' % self.match
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = False
self.mayIndexError = False
self.caseless = caseless
if caseless:
self.caselessmatch = matchString.upper()
identChars = identChars.upper()
self.identChars = set(identChars)
def parseImpl(self, instring, loc, doActions=True):
if self.caseless:
if ((instring[loc:loc + self.matchLen].upper() == self.caselessmatch)
and (loc >= len(instring) - self.matchLen
or instring[loc + self.matchLen].upper() not in self.identChars)
and (loc == 0
or instring[loc - 1].upper() not in self.identChars)):
return loc + self.matchLen, self.match
else:
if instring[loc] == self.firstMatchChar:
if ((self.matchLen == 1 or instring.startswith(self.match, loc))
and (loc >= len(instring) - self.matchLen
or instring[loc + self.matchLen] not in self.identChars)
and (loc == 0 or instring[loc - 1] not in self.identChars)):
return loc + self.matchLen, self.match
raise ParseException(instring, loc, self.errmsg, self)
def copy(self):
c = super(Keyword, self).copy()
c.identChars = Keyword.DEFAULT_KEYWORD_CHARS
return c
@staticmethod
def setDefaultKeywordChars(chars):
"""Overrides the default Keyword chars
"""
Keyword.DEFAULT_KEYWORD_CHARS = chars
class CaselessLiteral(Literal):
"""Token to match a specified string, ignoring case of letters.
Note: the matched results will always be in the case of the given
match string, NOT the case of the input text.
Example::
OneOrMore(CaselessLiteral("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD', 'CMD']
(Contrast with example for :class:`CaselessKeyword`.)
"""
def __init__(self, matchString):
super(CaselessLiteral, self).__init__(matchString.upper())
# Preserve the defining literal.
self.returnString = matchString
self.name = "'%s'" % self.returnString
self.errmsg = "Expected " + self.name
def parseImpl(self, instring, loc, doActions=True):
if instring[loc:loc + self.matchLen].upper() == self.match:
return loc + self.matchLen, self.returnString
raise ParseException(instring, loc, self.errmsg, self)
class CaselessKeyword(Keyword):
"""
Caseless version of :class:`Keyword`.
Example::
OneOrMore(CaselessKeyword("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD']
(Contrast with example for :class:`CaselessLiteral`.)
"""
def __init__(self, matchString, identChars=None):
super(CaselessKeyword, self).__init__(matchString, identChars, caseless=True)
class CloseMatch(Token):
"""A variation on :class:`Literal` which matches "close" matches,
that is, strings with at most 'n' mismatching characters.
:class:`CloseMatch` takes parameters:
- ``match_string`` - string to be matched
- ``maxMismatches`` - (``default=1``) maximum number of
mismatches allowed to count as a match
The results from a successful parse will contain the matched text
from the input string and the following named results:
- ``mismatches`` - a list of the positions within the
match_string where mismatches were found
- ``original`` - the original match_string used to compare
against the input string
If ``mismatches`` is an empty list, then the match was an exact
match.
Example::
patt = CloseMatch("ATCATCGAATGGA")
patt.parseString("ATCATCGAAXGGA") # -> (['ATCATCGAAXGGA'], {'mismatches': [[9]], 'original': ['ATCATCGAATGGA']})
patt.parseString("ATCAXCGAAXGGA") # -> Exception: Expected 'ATCATCGAATGGA' (with up to 1 mismatches) (at char 0), (line:1, col:1)
# exact match
patt.parseString("ATCATCGAATGGA") # -> (['ATCATCGAATGGA'], {'mismatches': [[]], 'original': ['ATCATCGAATGGA']})
# close match allowing up to 2 mismatches
patt = CloseMatch("ATCATCGAATGGA", maxMismatches=2)
patt.parseString("ATCAXCGAAXGGA") # -> (['ATCAXCGAAXGGA'], {'mismatches': [[4, 9]], 'original': ['ATCATCGAATGGA']})
"""
def __init__(self, match_string, maxMismatches=1):
super(CloseMatch, self).__init__()
self.name = match_string
self.match_string = match_string
self.maxMismatches = maxMismatches
self.errmsg = "Expected %r (with up to %d mismatches)" % (self.match_string, self.maxMismatches)
self.mayIndexError = False
self.mayReturnEmpty = False
def parseImpl(self, instring, loc, doActions=True):
start = loc
instrlen = len(instring)
maxloc = start + len(self.match_string)
if maxloc <= instrlen:
match_string = self.match_string
match_stringloc = 0
mismatches = []
maxMismatches = self.maxMismatches
for match_stringloc, s_m in enumerate(zip(instring[loc:maxloc], match_string)):
src, mat = s_m
if src != mat:
mismatches.append(match_stringloc)
if len(mismatches) > maxMismatches:
break
else:
loc = match_stringloc + 1
results = ParseResults([instring[start:loc]])
results['original'] = match_string
results['mismatches'] = mismatches
return loc, results
raise ParseException(instring, loc, self.errmsg, self)
class Word(Token):
"""Token for matching words composed of allowed character sets.
Defined with string containing all allowed initial characters, an
optional string containing allowed body characters (if omitted,
defaults to the initial character set), and an optional minimum,
maximum, and/or exact length. The default value for ``min`` is
1 (a minimum value < 1 is not valid); the default values for
``max`` and ``exact`` are 0, meaning no maximum or exact
length restriction. An optional ``excludeChars`` parameter can
list characters that might be found in the input ``bodyChars``
string; useful to define a word of all printables except for one or
two characters, for instance.
:class:`srange` is useful for defining custom character set strings
for defining ``Word`` expressions, using range notation from
regular expression character sets.
A common mistake is to use :class:`Word` to match a specific literal
string, as in ``Word("Address")``. Remember that :class:`Word`
uses the string argument to define *sets* of matchable characters.
This expression would match "Add", "AAA", "dAred", or any other word
made up of the characters 'A', 'd', 'r', 'e', and 's'. To match an
exact literal string, use :class:`Literal` or :class:`Keyword`.
pyparsing includes helper strings for building Words:
- :class:`alphas`
- :class:`nums`
- :class:`alphanums`
- :class:`hexnums`
- :class:`alphas8bit` (alphabetic characters in ASCII range 128-255
- accented, tilded, umlauted, etc.)
- :class:`punc8bit` (non-alphabetic characters in ASCII range
128-255 - currency, symbols, superscripts, diacriticals, etc.)
- :class:`printables` (any non-whitespace character)
Example::
# a word composed of digits
integer = Word(nums) # equivalent to Word("0123456789") or Word(srange("0-9"))
# a word with a leading capital, and zero or more lowercase
capital_word = Word(alphas.upper(), alphas.lower())
# hostnames are alphanumeric, with leading alpha, and '-'
hostname = Word(alphas, alphanums + '-')
# roman numeral (not a strict parser, accepts invalid mix of characters)
roman = Word("IVXLCDM")
# any string of non-whitespace characters, except for ','
csv_value = Word(printables, excludeChars=",")
"""
def __init__(self, initChars, bodyChars=None, min=1, max=0, exact=0, asKeyword=False, excludeChars=None):
super(Word, self).__init__()
if excludeChars:
excludeChars = set(excludeChars)
initChars = ''.join(c for c in initChars if c not in excludeChars)
if bodyChars:
bodyChars = ''.join(c for c in bodyChars if c not in excludeChars)
self.initCharsOrig = initChars
self.initChars = set(initChars)
if bodyChars:
self.bodyCharsOrig = bodyChars
self.bodyChars = set(bodyChars)
else:
self.bodyCharsOrig = initChars
self.bodyChars = set(initChars)
self.maxSpecified = max > 0
if min < 1:
raise ValueError("cannot specify a minimum length < 1; use Optional(Word()) if zero-length word is permitted")
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.mayIndexError = False
self.asKeyword = asKeyword
if ' ' not in self.initCharsOrig + self.bodyCharsOrig and (min == 1 and max == 0 and exact == 0):
if self.bodyCharsOrig == self.initCharsOrig:
self.reString = "[%s]+" % _escapeRegexRangeChars(self.initCharsOrig)
elif len(self.initCharsOrig) == 1:
self.reString = "%s[%s]*" % (re.escape(self.initCharsOrig),
_escapeRegexRangeChars(self.bodyCharsOrig),)
else:
self.reString = "[%s][%s]*" % (_escapeRegexRangeChars(self.initCharsOrig),
_escapeRegexRangeChars(self.bodyCharsOrig),)
if self.asKeyword:
self.reString = r"\b" + self.reString + r"\b"
try:
self.re = re.compile(self.reString)
except Exception:
self.re = None
else:
self.re_match = self.re.match
self.__class__ = _WordRegex
def parseImpl(self, instring, loc, doActions=True):
if instring[loc] not in self.initChars:
raise ParseException(instring, loc, self.errmsg, self)
start = loc
loc += 1
instrlen = len(instring)
bodychars = self.bodyChars
maxloc = start + self.maxLen
maxloc = min(maxloc, instrlen)
while loc < maxloc and instring[loc] in bodychars:
loc += 1
throwException = False
if loc - start < self.minLen:
throwException = True
elif self.maxSpecified and loc < instrlen and instring[loc] in bodychars:
throwException = True
elif self.asKeyword:
if (start > 0 and instring[start - 1] in bodychars
or loc < instrlen and instring[loc] in bodychars):
throwException = True
if throwException:
raise ParseException(instring, loc, self.errmsg, self)
return loc, instring[start:loc]
def __str__(self):
try:
return super(Word, self).__str__()
except Exception:
pass
if self.strRepr is None:
def charsAsStr(s):
if len(s) > 4:
return s[:4] + "..."
else:
return s
if self.initCharsOrig != self.bodyCharsOrig:
self.strRepr = "W:(%s, %s)" % (charsAsStr(self.initCharsOrig), charsAsStr(self.bodyCharsOrig))
else:
self.strRepr = "W:(%s)" % charsAsStr(self.initCharsOrig)
return self.strRepr
class _WordRegex(Word):
def parseImpl(self, instring, loc, doActions=True):
result = self.re_match(instring, loc)
if not result:
raise ParseException(instring, loc, self.errmsg, self)
loc = result.end()
return loc, result.group()
class Char(_WordRegex):
"""A short-cut class for defining ``Word(characters, exact=1)``,
when defining a match of any single character in a string of
characters.
"""
def __init__(self, charset, asKeyword=False, excludeChars=None):
super(Char, self).__init__(charset, exact=1, asKeyword=asKeyword, excludeChars=excludeChars)
self.reString = "[%s]" % _escapeRegexRangeChars(''.join(self.initChars))
if asKeyword:
self.reString = r"\b%s\b" % self.reString
self.re = re.compile(self.reString)
self.re_match = self.re.match
class Regex(Token):
r"""Token for matching strings that match a given regular
expression. Defined with string specifying the regular expression in
a form recognized by the stdlib Python `re module <https://docs.python.org/3/library/re.html>`_.
If the given regex contains named groups (defined using ``(?P<name>...)``),
these will be preserved as named parse results.
If instead of the Python stdlib re module you wish to use a different RE module
(such as the `regex` module), you can replace it by either building your
Regex object with a compiled RE that was compiled using regex:
Example::
realnum = Regex(r"[+-]?\d+\.\d*")
date = Regex(r'(?P<year>\d{4})-(?P<month>\d\d?)-(?P<day>\d\d?)')
# ref: https://stackoverflow.com/questions/267399/how-do-you-match-only-valid-roman-numerals-with-a-regular-expression
roman = Regex(r"M{0,4}(CM|CD|D?{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})")
# use regex module instead of stdlib re module to construct a Regex using
# a compiled regular expression
import regex
parser = pp.Regex(regex.compile(r'[0-9]'))
"""
def __init__(self, pattern, flags=0, asGroupList=False, asMatch=False):
"""The parameters ``pattern`` and ``flags`` are passed
to the ``re.compile()`` function as-is. See the Python
`re module <https://docs.python.org/3/library/re.html>`_ module for an
explanation of the acceptable patterns and flags.
"""
super(Regex, self).__init__()
if isinstance(pattern, basestring):
if not pattern:
warnings.warn("null string passed to Regex; use Empty() instead",
SyntaxWarning, stacklevel=2)
self.pattern = pattern
self.flags = flags
try:
self.re = re.compile(self.pattern, self.flags)
self.reString = self.pattern
except sre_constants.error:
warnings.warn("invalid pattern (%s) passed to Regex" % pattern,
SyntaxWarning, stacklevel=2)
raise
elif hasattr(pattern, 'pattern') and hasattr(pattern, 'match'):
self.re = pattern
self.pattern = self.reString = pattern.pattern
self.flags = flags
else:
raise TypeError("Regex may only be constructed with a string or a compiled RE object")
self.re_match = self.re.match
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.mayIndexError = False
self.mayReturnEmpty = self.re_match("") is not None
self.asGroupList = asGroupList
self.asMatch = asMatch
if self.asGroupList:
self.parseImpl = self.parseImplAsGroupList
if self.asMatch:
self.parseImpl = self.parseImplAsMatch
def parseImpl(self, instring, loc, doActions=True):
result = self.re_match(instring, loc)
if not result:
raise ParseException(instring, loc, self.errmsg, self)
loc = result.end()
ret = ParseResults(result.group())
d = result.groupdict()
if d:
for k, v in d.items():
ret[k] = v
return loc, ret
def parseImplAsGroupList(self, instring, loc, doActions=True):
result = self.re_match(instring, loc)
if not result:
raise ParseException(instring, loc, self.errmsg, self)
loc = result.end()
ret = result.groups()
return loc, ret
def parseImplAsMatch(self, instring, loc, doActions=True):
result = self.re_match(instring, loc)
if not result:
raise ParseException(instring, loc, self.errmsg, self)
loc = result.end()
ret = result
return loc, ret
def __str__(self):
try:
return super(Regex, self).__str__()
except Exception:
pass
if self.strRepr is None:
self.strRepr = "Re:(%s)" % repr(self.pattern)
return self.strRepr
def sub(self, repl):
r"""
Return Regex with an attached parse action to transform the parsed
result as if called using `re.sub(expr, repl, string) <https://docs.python.org/3/library/re.html#re.sub>`_.
Example::
make_html = Regex(r"(\w+):(.*?):").sub(r"<\1>\2</\1>")
print(make_html.transformString("h1:main title:"))
# prints "<h1>main title</h1>"
"""
if self.asGroupList:
warnings.warn("cannot use sub() with Regex(asGroupList=True)",
SyntaxWarning, stacklevel=2)
raise SyntaxError()
if self.asMatch and callable(repl):
warnings.warn("cannot use sub() with a callable with Regex(asMatch=True)",
SyntaxWarning, stacklevel=2)
raise SyntaxError()
if self.asMatch:
def pa(tokens):
return tokens[0].expand(repl)
else:
def pa(tokens):
return self.re.sub(repl, tokens[0])
return self.addParseAction(pa)
class QuotedString(Token):
r"""
Token for matching strings that are delimited by quoting characters.
Defined with the following parameters:
- quoteChar - string of one or more characters defining the
quote delimiting string
- escChar - character to escape quotes, typically backslash
(default= ``None``)
- escQuote - special quote sequence to escape an embedded quote
string (such as SQL's ``""`` to escape an embedded ``"``)
(default= ``None``)
- multiline - boolean indicating whether quotes can span
multiple lines (default= ``False``)
- unquoteResults - boolean indicating whether the matched text
should be unquoted (default= ``True``)
- endQuoteChar - string of one or more characters defining the
end of the quote delimited string (default= ``None`` => same as
quoteChar)
- convertWhitespaceEscapes - convert escaped whitespace
(``'\t'``, ``'\n'``, etc.) to actual whitespace
(default= ``True``)
Example::
qs = QuotedString('"')
print(qs.searchString('lsjdf "This is the quote" sldjf'))
complex_qs = QuotedString('{{', endQuoteChar='}}')
print(complex_qs.searchString('lsjdf {{This is the "quote"}} sldjf'))
sql_qs = QuotedString('"', escQuote='""')
print(sql_qs.searchString('lsjdf "This is the quote with ""embedded"" quotes" sldjf'))
prints::
[['This is the quote']]
[['This is the "quote"']]
[['This is the quote with "embedded" quotes']]
"""
def __init__(self, quoteChar, escChar=None, escQuote=None, multiline=False,
unquoteResults=True, endQuoteChar=None, convertWhitespaceEscapes=True):
super(QuotedString, self).__init__()
# remove white space from quote chars - wont work anyway
quoteChar = quoteChar.strip()
if not quoteChar:
warnings.warn("quoteChar cannot be the empty string", SyntaxWarning, stacklevel=2)
raise SyntaxError()
if endQuoteChar is None:
endQuoteChar = quoteChar
else:
endQuoteChar = endQuoteChar.strip()
if not endQuoteChar:
warnings.warn("endQuoteChar cannot be the empty string", SyntaxWarning, stacklevel=2)
raise SyntaxError()
self.quoteChar = quoteChar
self.quoteCharLen = len(quoteChar)
self.firstQuoteChar = quoteChar[0]
self.endQuoteChar = endQuoteChar
self.endQuoteCharLen = len(endQuoteChar)
self.escChar = escChar
self.escQuote = escQuote
self.unquoteResults = unquoteResults
self.convertWhitespaceEscapes = convertWhitespaceEscapes
if multiline:
self.flags = re.MULTILINE | re.DOTALL
self.pattern = r'%s(?:[^%s%s]' % (re.escape(self.quoteChar),
_escapeRegexRangeChars(self.endQuoteChar[0]),
(escChar is not None and _escapeRegexRangeChars(escChar) or ''))
else:
self.flags = 0
self.pattern = r'%s(?:[^%s\n\r%s]' % (re.escape(self.quoteChar),
_escapeRegexRangeChars(self.endQuoteChar[0]),
(escChar is not None and _escapeRegexRangeChars(escChar) or ''))
if len(self.endQuoteChar) > 1:
self.pattern += (
'|(?:' + ')|(?:'.join("%s[^%s]" % (re.escape(self.endQuoteChar[:i]),
_escapeRegexRangeChars(self.endQuoteChar[i]))
for i in range(len(self.endQuoteChar) - 1, 0, -1)) + ')')
if escQuote:
self.pattern += (r'|(?:%s)' % re.escape(escQuote))
if escChar:
self.pattern += (r'|(?:%s.)' % re.escape(escChar))
self.escCharReplacePattern = re.escape(self.escChar) + "(.)"
self.pattern += (r')*%s' % re.escape(self.endQuoteChar))
try:
self.re = re.compile(self.pattern, self.flags)
self.reString = self.pattern
self.re_match = self.re.match
except sre_constants.error:
warnings.warn("invalid pattern (%s) passed to Regex" % self.pattern,
SyntaxWarning, stacklevel=2)
raise
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.mayIndexError = False
self.mayReturnEmpty = True
def parseImpl(self, instring, loc, doActions=True):
result = instring[loc] == self.firstQuoteChar and self.re_match(instring, loc) or None
if not result:
raise ParseException(instring, loc, self.errmsg, self)
loc = result.end()
ret = result.group()
if self.unquoteResults:
# strip off quotes
ret = ret[self.quoteCharLen: -self.endQuoteCharLen]
if isinstance(ret, basestring):
# replace escaped whitespace
if '\\' in ret and self.convertWhitespaceEscapes:
ws_map = {
r'\t': '\t',
r'\n': '\n',
r'\f': '\f',
r'\r': '\r',
}
for wslit, wschar in ws_map.items():
ret = ret.replace(wslit, wschar)
# replace escaped characters
if self.escChar:
ret = re.sub(self.escCharReplacePattern, r"\g<1>", ret)
# replace escaped quotes
if self.escQuote:
ret = ret.replace(self.escQuote, self.endQuoteChar)
return loc, ret
def __str__(self):
try:
return super(QuotedString, self).__str__()
except Exception:
pass
if self.strRepr is None:
self.strRepr = "quoted string, starting with %s ending with %s" % (self.quoteChar, self.endQuoteChar)
return self.strRepr
class CharsNotIn(Token):
"""Token for matching words composed of characters *not* in a given
set (will include whitespace in matched characters if not listed in
the provided exclusion set - see example). Defined with string
containing all disallowed characters, and an optional minimum,
maximum, and/or exact length. The default value for ``min`` is
1 (a minimum value < 1 is not valid); the default values for
``max`` and ``exact`` are 0, meaning no maximum or exact
length restriction.
Example::
# define a comma-separated-value as anything that is not a ','
csv_value = CharsNotIn(',')
print(delimitedList(csv_value).parseString("dkls,lsdkjf,s12 34,@!#,213"))
prints::
['dkls', 'lsdkjf', 's12 34', '@!#', '213']
"""
def __init__(self, notChars, min=1, max=0, exact=0):
super(CharsNotIn, self).__init__()
self.skipWhitespace = False
self.notChars = notChars
if min < 1:
raise ValueError("cannot specify a minimum length < 1; use "
"Optional(CharsNotIn()) if zero-length char group is permitted")
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = (self.minLen == 0)
self.mayIndexError = False
def parseImpl(self, instring, loc, doActions=True):
if instring[loc] in self.notChars:
raise ParseException(instring, loc, self.errmsg, self)
start = loc
loc += 1
notchars = self.notChars
maxlen = min(start + self.maxLen, len(instring))
while loc < maxlen and instring[loc] not in notchars:
loc += 1
if loc - start < self.minLen:
raise ParseException(instring, loc, self.errmsg, self)
return loc, instring[start:loc]
def __str__(self):
try:
return super(CharsNotIn, self).__str__()
except Exception:
pass
if self.strRepr is None:
if len(self.notChars) > 4:
self.strRepr = "!W:(%s...)" % self.notChars[:4]
else:
self.strRepr = "!W:(%s)" % self.notChars
return self.strRepr
class White(Token):
"""Special matching class for matching whitespace. Normally,
whitespace is ignored by pyparsing grammars. This class is included
when some whitespace structures are significant. Define with
a string containing the whitespace characters to be matched; default
is ``" \\t\\r\\n"``. Also takes optional ``min``,
``max``, and ``exact`` arguments, as defined for the
:class:`Word` class.
"""
whiteStrs = {
' ' : '<SP>',
'\t': '<TAB>',
'\n': '<LF>',
'\r': '<CR>',
'\f': '<FF>',
u'\u00A0': '<NBSP>',
u'\u1680': '<OGHAM_SPACE_MARK>',
u'\u180E': '<MONGOLIAN_VOWEL_SEPARATOR>',
u'\u2000': '<EN_QUAD>',
u'\u2001': '<EM_QUAD>',
u'\u2002': '<EN_SPACE>',
u'\u2003': '<EM_SPACE>',
u'\u2004': '<THREE-PER-EM_SPACE>',
u'\u2005': '<FOUR-PER-EM_SPACE>',
u'\u2006': '<SIX-PER-EM_SPACE>',
u'\u2007': '<FIGURE_SPACE>',
u'\u2008': '<PUNCTUATION_SPACE>',
u'\u2009': '<THIN_SPACE>',
u'\u200A': '<HAIR_SPACE>',
u'\u200B': '<ZERO_WIDTH_SPACE>',
u'\u202F': '<NNBSP>',
u'\u205F': '<MMSP>',
u'\u3000': '<IDEOGRAPHIC_SPACE>',
}
def __init__(self, ws=" \t\r\n", min=1, max=0, exact=0):
super(White, self).__init__()
self.matchWhite = ws
self.setWhitespaceChars("".join(c for c in self.whiteChars if c not in self.matchWhite))
# ~ self.leaveWhitespace()
self.name = ("".join(White.whiteStrs[c] for c in self.matchWhite))
self.mayReturnEmpty = True
self.errmsg = "Expected " + self.name
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
def parseImpl(self, instring, loc, doActions=True):
if instring[loc] not in self.matchWhite:
raise ParseException(instring, loc, self.errmsg, self)
start = loc
loc += 1
maxloc = start + self.maxLen
maxloc = min(maxloc, len(instring))
while loc < maxloc and instring[loc] in self.matchWhite:
loc += 1
if loc - start < self.minLen:
raise ParseException(instring, loc, self.errmsg, self)
return loc, instring[start:loc]
class _PositionToken(Token):
def __init__(self):
super(_PositionToken, self).__init__()
self.name = self.__class__.__name__
self.mayReturnEmpty = True
self.mayIndexError = False
class GoToColumn(_PositionToken):
"""Token to advance to a specific column of input text; useful for
tabular report scraping.
"""
def __init__(self, colno):
super(GoToColumn, self).__init__()
self.col = colno
def preParse(self, instring, loc):
if col(loc, instring) != self.col:
instrlen = len(instring)
if self.ignoreExprs:
loc = self._skipIgnorables(instring, loc)
while loc < instrlen and instring[loc].isspace() and col(loc, instring) != self.col:
loc += 1
return loc
def parseImpl(self, instring, loc, doActions=True):
thiscol = col(loc, instring)
if thiscol > self.col:
raise ParseException(instring, loc, "Text not in expected column", self)
newloc = loc + self.col - thiscol
ret = instring[loc: newloc]
return newloc, ret
class LineStart(_PositionToken):
r"""Matches if current position is at the beginning of a line within
the parse string
Example::
test = '''\
AAA this line
AAA and this line
AAA but not this one
B AAA and definitely not this one
'''
for t in (LineStart() + 'AAA' + restOfLine).searchString(test):
print(t)
prints::
['AAA', ' this line']
['AAA', ' and this line']
"""
def __init__(self):
super(LineStart, self).__init__()
self.errmsg = "Expected start of line"
def parseImpl(self, instring, loc, doActions=True):
if col(loc, instring) == 1:
return loc, []
raise ParseException(instring, loc, self.errmsg, self)
class LineEnd(_PositionToken):
"""Matches if current position is at the end of a line within the
parse string
"""
def __init__(self):
super(LineEnd, self).__init__()
self.setWhitespaceChars(ParserElement.DEFAULT_WHITE_CHARS.replace("\n", ""))
self.errmsg = "Expected end of line"
def parseImpl(self, instring, loc, doActions=True):
if loc < len(instring):
if instring[loc] == "\n":
return loc + 1, "\n"
else:
raise ParseException(instring, loc, self.errmsg, self)
elif loc == len(instring):
return loc + 1, []
else:
raise ParseException(instring, loc, self.errmsg, self)
class StringStart(_PositionToken):
"""Matches if current position is at the beginning of the parse
string
"""
def __init__(self):
super(StringStart, self).__init__()
self.errmsg = "Expected start of text"
def parseImpl(self, instring, loc, doActions=True):
if loc != 0:
# see if entire string up to here is just whitespace and ignoreables
if loc != self.preParse(instring, 0):
raise ParseException(instring, loc, self.errmsg, self)
return loc, []
class StringEnd(_PositionToken):
"""Matches if current position is at the end of the parse string
"""
def __init__(self):
super(StringEnd, self).__init__()
self.errmsg = "Expected end of text"
def parseImpl(self, instring, loc, doActions=True):
if loc < len(instring):
raise ParseException(instring, loc, self.errmsg, self)
elif loc == len(instring):
return loc + 1, []
elif loc > len(instring):
return loc, []
else:
raise ParseException(instring, loc, self.errmsg, self)
class WordStart(_PositionToken):
"""Matches if the current position is at the beginning of a Word,
and is not preceded by any character in a given set of
``wordChars`` (default= ``printables``). To emulate the
``\b`` behavior of regular expressions, use
``WordStart(alphanums)``. ``WordStart`` will also match at
the beginning of the string being parsed, or at the beginning of
a line.
"""
def __init__(self, wordChars=printables):
super(WordStart, self).__init__()
self.wordChars = set(wordChars)
self.errmsg = "Not at the start of a word"
def parseImpl(self, instring, loc, doActions=True):
if loc != 0:
if (instring[loc - 1] in self.wordChars
or instring[loc] not in self.wordChars):
raise ParseException(instring, loc, self.errmsg, self)
return loc, []
class WordEnd(_PositionToken):
"""Matches if the current position is at the end of a Word, and is
not followed by any character in a given set of ``wordChars``
(default= ``printables``). To emulate the ``\b`` behavior of
regular expressions, use ``WordEnd(alphanums)``. ``WordEnd``
will also match at the end of the string being parsed, or at the end
of a line.
"""
def __init__(self, wordChars=printables):
super(WordEnd, self).__init__()
self.wordChars = set(wordChars)
self.skipWhitespace = False
self.errmsg = "Not at the end of a word"
def parseImpl(self, instring, loc, doActions=True):
instrlen = len(instring)
if instrlen > 0 and loc < instrlen:
if (instring[loc] in self.wordChars or
instring[loc - 1] not in self.wordChars):
raise ParseException(instring, loc, self.errmsg, self)
return loc, []
class ParseExpression(ParserElement):
"""Abstract subclass of ParserElement, for combining and
post-processing parsed tokens.
"""
def __init__(self, exprs, savelist=False):
super(ParseExpression, self).__init__(savelist)
if isinstance(exprs, _generatorType):
exprs = list(exprs)
if isinstance(exprs, basestring):
self.exprs = [self._literalStringClass(exprs)]
elif isinstance(exprs, ParserElement):
self.exprs = [exprs]
elif isinstance(exprs, Iterable):
exprs = list(exprs)
# if sequence of strings provided, wrap with Literal
if any(isinstance(expr, basestring) for expr in exprs):
exprs = (self._literalStringClass(e) if isinstance(e, basestring) else e for e in exprs)
self.exprs = list(exprs)
else:
try:
self.exprs = list(exprs)
except TypeError:
self.exprs = [exprs]
self.callPreparse = False
def append(self, other):
self.exprs.append(other)
self.strRepr = None
return self
def leaveWhitespace(self):
"""Extends ``leaveWhitespace`` defined in base class, and also invokes ``leaveWhitespace`` on
all contained expressions."""
self.skipWhitespace = False
self.exprs = [e.copy() for e in self.exprs]
for e in self.exprs:
e.leaveWhitespace()
return self
def ignore(self, other):
if isinstance(other, Suppress):
if other not in self.ignoreExprs:
super(ParseExpression, self).ignore(other)
for e in self.exprs:
e.ignore(self.ignoreExprs[-1])
else:
super(ParseExpression, self).ignore(other)
for e in self.exprs:
e.ignore(self.ignoreExprs[-1])
return self
def __str__(self):
try:
return super(ParseExpression, self).__str__()
except Exception:
pass
if self.strRepr is None:
self.strRepr = "%s:(%s)" % (self.__class__.__name__, _ustr(self.exprs))
return self.strRepr
def streamline(self):
super(ParseExpression, self).streamline()
for e in self.exprs:
e.streamline()
# collapse nested And's of the form And(And(And(a, b), c), d) to And(a, b, c, d)
# but only if there are no parse actions or resultsNames on the nested And's
# (likewise for Or's and MatchFirst's)
if len(self.exprs) == 2:
other = self.exprs[0]
if (isinstance(other, self.__class__)
and not other.parseAction
and other.resultsName is None
and not other.debug):
self.exprs = other.exprs[:] + [self.exprs[1]]
self.strRepr = None
self.mayReturnEmpty |= other.mayReturnEmpty
self.mayIndexError |= other.mayIndexError
other = self.exprs[-1]
if (isinstance(other, self.__class__)
and not other.parseAction
and other.resultsName is None
and not other.debug):
self.exprs = self.exprs[:-1] + other.exprs[:]
self.strRepr = None
self.mayReturnEmpty |= other.mayReturnEmpty
self.mayIndexError |= other.mayIndexError
self.errmsg = "Expected " + _ustr(self)
return self
def validate(self, validateTrace=None):
tmp = (validateTrace if validateTrace is not None else [])[:] + [self]
for e in self.exprs:
e.validate(tmp)
self.checkRecursion([])
def copy(self):
ret = super(ParseExpression, self).copy()
ret.exprs = [e.copy() for e in self.exprs]
return ret
def _setResultsName(self, name, listAllMatches=False):
if __diag__.warn_ungrouped_named_tokens_in_collection:
for e in self.exprs:
if isinstance(e, ParserElement) and e.resultsName:
warnings.warn("{0}: setting results name {1!r} on {2} expression "
"collides with {3!r} on contained expression".format("warn_ungrouped_named_tokens_in_collection",
name,
type(self).__name__,
e.resultsName),
stacklevel=3)
return super(ParseExpression, self)._setResultsName(name, listAllMatches)
class And(ParseExpression):
"""
Requires all given :class:`ParseExpression` s to be found in the given order.
Expressions may be separated by whitespace.
May be constructed using the ``'+'`` operator.
May also be constructed using the ``'-'`` operator, which will
suppress backtracking.
Example::
integer = Word(nums)
name_expr = OneOrMore(Word(alphas))
expr = And([integer("id"), name_expr("name"), integer("age")])
# more easily written as:
expr = integer("id") + name_expr("name") + integer("age")
"""
class _ErrorStop(Empty):
def __init__(self, *args, **kwargs):
super(And._ErrorStop, self).__init__(*args, **kwargs)
self.name = '-'
self.leaveWhitespace()
def __init__(self, exprs, savelist=True):
exprs = list(exprs)
if exprs and Ellipsis in exprs:
tmp = []
for i, expr in enumerate(exprs):
if expr is Ellipsis:
if i < len(exprs) - 1:
skipto_arg = (Empty() + exprs[i + 1]).exprs[-1]
tmp.append(SkipTo(skipto_arg)("_skipped*"))
else:
raise Exception("cannot construct And with sequence ending in ...")
else:
tmp.append(expr)
exprs[:] = tmp
super(And, self).__init__(exprs, savelist)
self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
self.setWhitespaceChars(self.exprs[0].whiteChars)
self.skipWhitespace = self.exprs[0].skipWhitespace
self.callPreparse = True
def streamline(self):
# collapse any _PendingSkip's
if self.exprs:
if any(isinstance(e, ParseExpression) and e.exprs and isinstance(e.exprs[-1], _PendingSkip)
for e in self.exprs[:-1]):
for i, e in enumerate(self.exprs[:-1]):
if e is None:
continue
if (isinstance(e, ParseExpression)
and e.exprs and isinstance(e.exprs[-1], _PendingSkip)):
e.exprs[-1] = e.exprs[-1] + self.exprs[i + 1]
self.exprs[i + 1] = None
self.exprs = [e for e in self.exprs if e is not None]
super(And, self).streamline()
self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
return self
def parseImpl(self, instring, loc, doActions=True):
# pass False as last arg to _parse for first element, since we already
# pre-parsed the string as part of our And pre-parsing
loc, resultlist = self.exprs[0]._parse(instring, loc, doActions, callPreParse=False)
errorStop = False
for e in self.exprs[1:]:
if isinstance(e, And._ErrorStop):
errorStop = True
continue
if errorStop:
try:
loc, exprtokens = e._parse(instring, loc, doActions)
except ParseSyntaxException:
raise
except ParseBaseException as pe:
pe.__traceback__ = None
raise ParseSyntaxException._from_exception(pe)
except IndexError:
raise ParseSyntaxException(instring, len(instring), self.errmsg, self)
else:
loc, exprtokens = e._parse(instring, loc, doActions)
if exprtokens or exprtokens.haskeys():
resultlist += exprtokens
return loc, resultlist
def __iadd__(self, other):
if isinstance(other, basestring):
other = self._literalStringClass(other)
return self.append(other) # And([self, other])
def checkRecursion(self, parseElementList):
subRecCheckList = parseElementList[:] + [self]
for e in self.exprs:
e.checkRecursion(subRecCheckList)
if not e.mayReturnEmpty:
break
def __str__(self):
if hasattr(self, "name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " ".join(_ustr(e) for e in self.exprs) + "}"
return self.strRepr
class Or(ParseExpression):
"""Requires that at least one :class:`ParseExpression` is found. If
two expressions match, the expression that matches the longest
string will be used. May be constructed using the ``'^'``
operator.
Example::
# construct Or using '^' operator
number = Word(nums) ^ Combine(Word(nums) + '.' + Word(nums))
print(number.searchString("123 3.1416 789"))
prints::
[['123'], ['3.1416'], ['789']]
"""
def __init__(self, exprs, savelist=False):
super(Or, self).__init__(exprs, savelist)
if self.exprs:
self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)
else:
self.mayReturnEmpty = True
def streamline(self):
super(Or, self).streamline()
if __compat__.collect_all_And_tokens:
self.saveAsList = any(e.saveAsList for e in self.exprs)
return self
def parseImpl(self, instring, loc, doActions=True):
maxExcLoc = -1
maxException = None
matches = []
for e in self.exprs:
try:
loc2 = e.tryParse(instring, loc)
except ParseException as err:
err.__traceback__ = None
if err.loc > maxExcLoc:
maxException = err
maxExcLoc = err.loc
except IndexError:
if len(instring) > maxExcLoc:
maxException = ParseException(instring, len(instring), e.errmsg, self)
maxExcLoc = len(instring)
else:
# save match among all matches, to retry longest to shortest
matches.append((loc2, e))
if matches:
# re-evaluate all matches in descending order of length of match, in case attached actions
# might change whether or how much they match of the input.
matches.sort(key=itemgetter(0), reverse=True)
if not doActions:
# no further conditions or parse actions to change the selection of
# alternative, so the first match will be the best match
best_expr = matches[0][1]
return best_expr._parse(instring, loc, doActions)
longest = -1, None
for loc1, expr1 in matches:
if loc1 <= longest[0]:
# already have a longer match than this one will deliver, we are done
return longest
try:
loc2, toks = expr1._parse(instring, loc, doActions)
except ParseException as err:
err.__traceback__ = None
if err.loc > maxExcLoc:
maxException = err
maxExcLoc = err.loc
else:
if loc2 >= loc1:
return loc2, toks
# didn't match as much as before
elif loc2 > longest[0]:
longest = loc2, toks
if longest != (-1, None):
return longest
if maxException is not None:
maxException.msg = self.errmsg
raise maxException
else:
raise ParseException(instring, loc, "no defined alternatives to match", self)
def __ixor__(self, other):
if isinstance(other, basestring):
other = self._literalStringClass(other)
return self.append(other) # Or([self, other])
def __str__(self):
if hasattr(self, "name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " ^ ".join(_ustr(e) for e in self.exprs) + "}"
return self.strRepr
def checkRecursion(self, parseElementList):
subRecCheckList = parseElementList[:] + [self]
for e in self.exprs:
e.checkRecursion(subRecCheckList)
def _setResultsName(self, name, listAllMatches=False):
if (not __compat__.collect_all_And_tokens
and __diag__.warn_multiple_tokens_in_named_alternation):
if any(isinstance(e, And) for e in self.exprs):
warnings.warn("{0}: setting results name {1!r} on {2} expression "
"may only return a single token for an And alternative, "
"in future will return the full list of tokens".format(
"warn_multiple_tokens_in_named_alternation", name, type(self).__name__),
stacklevel=3)
return super(Or, self)._setResultsName(name, listAllMatches)
class MatchFirst(ParseExpression):
"""Requires that at least one :class:`ParseExpression` is found. If
two expressions match, the first one listed is the one that will
match. May be constructed using the ``'|'`` operator.
Example::
# construct MatchFirst using '|' operator
# watch the order of expressions to match
number = Word(nums) | Combine(Word(nums) + '.' + Word(nums))
print(number.searchString("123 3.1416 789")) # Fail! -> [['123'], ['3'], ['1416'], ['789']]
# put more selective expression first
number = Combine(Word(nums) + '.' + Word(nums)) | Word(nums)
print(number.searchString("123 3.1416 789")) # Better -> [['123'], ['3.1416'], ['789']]
"""
def __init__(self, exprs, savelist=False):
super(MatchFirst, self).__init__(exprs, savelist)
if self.exprs:
self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)
else:
self.mayReturnEmpty = True
def streamline(self):
super(MatchFirst, self).streamline()
if __compat__.collect_all_And_tokens:
self.saveAsList = any(e.saveAsList for e in self.exprs)
return self
def parseImpl(self, instring, loc, doActions=True):
maxExcLoc = -1
maxException = None
for e in self.exprs:
try:
ret = e._parse(instring, loc, doActions)
return ret
except ParseException as err:
if err.loc > maxExcLoc:
maxException = err
maxExcLoc = err.loc
except IndexError:
if len(instring) > maxExcLoc:
maxException = ParseException(instring, len(instring), e.errmsg, self)
maxExcLoc = len(instring)
# only got here if no expression matched, raise exception for match that made it the furthest
else:
if maxException is not None:
maxException.msg = self.errmsg
raise maxException
else:
raise ParseException(instring, loc, "no defined alternatives to match", self)
def __ior__(self, other):
if isinstance(other, basestring):
other = self._literalStringClass(other)
return self.append(other) # MatchFirst([self, other])
def __str__(self):
if hasattr(self, "name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " | ".join(_ustr(e) for e in self.exprs) + "}"
return self.strRepr
def checkRecursion(self, parseElementList):
subRecCheckList = parseElementList[:] + [self]
for e in self.exprs:
e.checkRecursion(subRecCheckList)
def _setResultsName(self, name, listAllMatches=False):
if (not __compat__.collect_all_And_tokens
and __diag__.warn_multiple_tokens_in_named_alternation):
if any(isinstance(e, And) for e in self.exprs):
warnings.warn("{0}: setting results name {1!r} on {2} expression "
"may only return a single token for an And alternative, "
"in future will return the full list of tokens".format(
"warn_multiple_tokens_in_named_alternation", name, type(self).__name__),
stacklevel=3)
return super(MatchFirst, self)._setResultsName(name, listAllMatches)
class Each(ParseExpression):
"""Requires all given :class:`ParseExpression` s to be found, but in
any order. Expressions may be separated by whitespace.
May be constructed using the ``'&'`` operator.
Example::
color = oneOf("RED ORANGE YELLOW GREEN BLUE PURPLE BLACK WHITE BROWN")
shape_type = oneOf("SQUARE CIRCLE TRIANGLE STAR HEXAGON OCTAGON")
integer = Word(nums)
shape_attr = "shape:" + shape_type("shape")
posn_attr = "posn:" + Group(integer("x") + ',' + integer("y"))("posn")
color_attr = "color:" + color("color")
size_attr = "size:" + integer("size")
# use Each (using operator '&') to accept attributes in any order
# (shape and posn are required, color and size are optional)
shape_spec = shape_attr & posn_attr & Optional(color_attr) & Optional(size_attr)
shape_spec.runTests('''
shape: SQUARE color: BLACK posn: 100, 120
shape: CIRCLE size: 50 color: BLUE posn: 50,80
color:GREEN size:20 shape:TRIANGLE posn:20,40
'''
)
prints::
shape: SQUARE color: BLACK posn: 100, 120
['shape:', 'SQUARE', 'color:', 'BLACK', 'posn:', ['100', ',', '120']]
- color: BLACK
- posn: ['100', ',', '120']
- x: 100
- y: 120
- shape: SQUARE
shape: CIRCLE size: 50 color: BLUE posn: 50,80
['shape:', 'CIRCLE', 'size:', '50', 'color:', 'BLUE', 'posn:', ['50', ',', '80']]
- color: BLUE
- posn: ['50', ',', '80']
- x: 50
- y: 80
- shape: CIRCLE
- size: 50
color: GREEN size: 20 shape: TRIANGLE posn: 20,40
['color:', 'GREEN', 'size:', '20', 'shape:', 'TRIANGLE', 'posn:', ['20', ',', '40']]
- color: GREEN
- posn: ['20', ',', '40']
- x: 20
- y: 40
- shape: TRIANGLE
- size: 20
"""
def __init__(self, exprs, savelist=True):
super(Each, self).__init__(exprs, savelist)
self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
self.skipWhitespace = True
self.initExprGroups = True
self.saveAsList = True
def streamline(self):
super(Each, self).streamline()
self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
return self
def parseImpl(self, instring, loc, doActions=True):
if self.initExprGroups:
self.opt1map = dict((id(e.expr), e) for e in self.exprs if isinstance(e, Optional))
opt1 = [e.expr for e in self.exprs if isinstance(e, Optional)]
opt2 = [e for e in self.exprs if e.mayReturnEmpty and not isinstance(e, (Optional, Regex))]
self.optionals = opt1 + opt2
self.multioptionals = [e.expr for e in self.exprs if isinstance(e, ZeroOrMore)]
self.multirequired = [e.expr for e in self.exprs if isinstance(e, OneOrMore)]
self.required = [e for e in self.exprs if not isinstance(e, (Optional, ZeroOrMore, OneOrMore))]
self.required += self.multirequired
self.initExprGroups = False
tmpLoc = loc
tmpReqd = self.required[:]
tmpOpt = self.optionals[:]
matchOrder = []
keepMatching = True
while keepMatching:
tmpExprs = tmpReqd + tmpOpt + self.multioptionals + self.multirequired
failed = []
for e in tmpExprs:
try:
tmpLoc = e.tryParse(instring, tmpLoc)
except ParseException:
failed.append(e)
else:
matchOrder.append(self.opt1map.get(id(e), e))
if e in tmpReqd:
tmpReqd.remove(e)
elif e in tmpOpt:
tmpOpt.remove(e)
if len(failed) == len(tmpExprs):
keepMatching = False
if tmpReqd:
missing = ", ".join(_ustr(e) for e in tmpReqd)
raise ParseException(instring, loc, "Missing one or more required elements (%s)" % missing)
# add any unmatched Optionals, in case they have default values defined
matchOrder += [e for e in self.exprs if isinstance(e, Optional) and e.expr in tmpOpt]
resultlist = []
for e in matchOrder:
loc, results = e._parse(instring, loc, doActions)
resultlist.append(results)
finalResults = sum(resultlist, ParseResults([]))
return loc, finalResults
def __str__(self):
if hasattr(self, "name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " & ".join(_ustr(e) for e in self.exprs) + "}"
return self.strRepr
def checkRecursion(self, parseElementList):
subRecCheckList = parseElementList[:] + [self]
for e in self.exprs:
e.checkRecursion(subRecCheckList)
class ParseElementEnhance(ParserElement):
"""Abstract subclass of :class:`ParserElement`, for combining and
post-processing parsed tokens.
"""
def __init__(self, expr, savelist=False):
super(ParseElementEnhance, self).__init__(savelist)
if isinstance(expr, basestring):
if issubclass(self._literalStringClass, Token):
expr = self._literalStringClass(expr)
else:
expr = self._literalStringClass(Literal(expr))
self.expr = expr
self.strRepr = None
if expr is not None:
self.mayIndexError = expr.mayIndexError
self.mayReturnEmpty = expr.mayReturnEmpty
self.setWhitespaceChars(expr.whiteChars)
self.skipWhitespace = expr.skipWhitespace
self.saveAsList = expr.saveAsList
self.callPreparse = expr.callPreparse
self.ignoreExprs.extend(expr.ignoreExprs)
def parseImpl(self, instring, loc, doActions=True):
if self.expr is not None:
return self.expr._parse(instring, loc, doActions, callPreParse=False)
else:
raise ParseException("", loc, self.errmsg, self)
def leaveWhitespace(self):
self.skipWhitespace = False
self.expr = self.expr.copy()
if self.expr is not None:
self.expr.leaveWhitespace()
return self
def ignore(self, other):
if isinstance(other, Suppress):
if other not in self.ignoreExprs:
super(ParseElementEnhance, self).ignore(other)
if self.expr is not None:
self.expr.ignore(self.ignoreExprs[-1])
else:
super(ParseElementEnhance, self).ignore(other)
if self.expr is not None:
self.expr.ignore(self.ignoreExprs[-1])
return self
def streamline(self):
super(ParseElementEnhance, self).streamline()
if self.expr is not None:
self.expr.streamline()
return self
def checkRecursion(self, parseElementList):
if self in parseElementList:
raise RecursiveGrammarException(parseElementList + [self])
subRecCheckList = parseElementList[:] + [self]
if self.expr is not None:
self.expr.checkRecursion(subRecCheckList)
def validate(self, validateTrace=None):
if validateTrace is None:
validateTrace = []
tmp = validateTrace[:] + [self]
if self.expr is not None:
self.expr.validate(tmp)
self.checkRecursion([])
def __str__(self):
try:
return super(ParseElementEnhance, self).__str__()
except Exception:
pass
if self.strRepr is None and self.expr is not None:
self.strRepr = "%s:(%s)" % (self.__class__.__name__, _ustr(self.expr))
return self.strRepr
class FollowedBy(ParseElementEnhance):
"""Lookahead matching of the given parse expression.
``FollowedBy`` does *not* advance the parsing position within
the input string, it only verifies that the specified parse
expression matches at the current position. ``FollowedBy``
always returns a null token list. If any results names are defined
in the lookahead expression, those *will* be returned for access by
name.
Example::
# use FollowedBy to match a label only if it is followed by a ':'
data_word = Word(alphas)
label = data_word + FollowedBy(':')
attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
OneOrMore(attr_expr).parseString("shape: SQUARE color: BLACK posn: upper left").pprint()
prints::
[['shape', 'SQUARE'], ['color', 'BLACK'], ['posn', 'upper left']]
"""
def __init__(self, expr):
super(FollowedBy, self).__init__(expr)
self.mayReturnEmpty = True
def parseImpl(self, instring, loc, doActions=True):
# by using self._expr.parse and deleting the contents of the returned ParseResults list
# we keep any named results that were defined in the FollowedBy expression
_, ret = self.expr._parse(instring, loc, doActions=doActions)
del ret[:]
return loc, ret
class PrecededBy(ParseElementEnhance):
"""Lookbehind matching of the given parse expression.
``PrecededBy`` does not advance the parsing position within the
input string, it only verifies that the specified parse expression
matches prior to the current position. ``PrecededBy`` always
returns a null token list, but if a results name is defined on the
given expression, it is returned.
Parameters:
- expr - expression that must match prior to the current parse
location
- retreat - (default= ``None``) - (int) maximum number of characters
to lookbehind prior to the current parse location
If the lookbehind expression is a string, Literal, Keyword, or
a Word or CharsNotIn with a specified exact or maximum length, then
the retreat parameter is not required. Otherwise, retreat must be
specified to give a maximum number of characters to look back from
the current parse position for a lookbehind match.
Example::
# VB-style variable names with type prefixes
int_var = PrecededBy("#") + pyparsing_common.identifier
str_var = PrecededBy("$") + pyparsing_common.identifier
"""
def __init__(self, expr, retreat=None):
super(PrecededBy, self).__init__(expr)
self.expr = self.expr().leaveWhitespace()
self.mayReturnEmpty = True
self.mayIndexError = False
self.exact = False
if isinstance(expr, str):
retreat = len(expr)
self.exact = True
elif isinstance(expr, (Literal, Keyword)):
retreat = expr.matchLen
self.exact = True
elif isinstance(expr, (Word, CharsNotIn)) and expr.maxLen != _MAX_INT:
retreat = expr.maxLen
self.exact = True
elif isinstance(expr, _PositionToken):
retreat = 0
self.exact = True
self.retreat = retreat
self.errmsg = "not preceded by " + str(expr)
self.skipWhitespace = False
self.parseAction.append(lambda s, l, t: t.__delitem__(slice(None, None)))
def parseImpl(self, instring, loc=0, doActions=True):
if self.exact:
if loc < self.retreat:
raise ParseException(instring, loc, self.errmsg)
start = loc - self.retreat
_, ret = self.expr._parse(instring, start)
else:
# retreat specified a maximum lookbehind window, iterate
test_expr = self.expr + StringEnd()
instring_slice = instring[max(0, loc - self.retreat):loc]
last_expr = ParseException(instring, loc, self.errmsg)
for offset in range(1, min(loc, self.retreat + 1)+1):
try:
# print('trying', offset, instring_slice, repr(instring_slice[loc - offset:]))
_, ret = test_expr._parse(instring_slice, len(instring_slice) - offset)
except ParseBaseException as pbe:
last_expr = pbe
else:
break
else:
raise last_expr
return loc, ret
class NotAny(ParseElementEnhance):
"""Lookahead to disallow matching with the given parse expression.
``NotAny`` does *not* advance the parsing position within the
input string, it only verifies that the specified parse expression
does *not* match at the current position. Also, ``NotAny`` does
*not* skip over leading whitespace. ``NotAny`` always returns
a null token list. May be constructed using the '~' operator.
Example::
AND, OR, NOT = map(CaselessKeyword, "AND OR NOT".split())
# take care not to mistake keywords for identifiers
ident = ~(AND | OR | NOT) + Word(alphas)
boolean_term = Optional(NOT) + ident
# very crude boolean expression - to support parenthesis groups and
# operation hierarchy, use infixNotation
boolean_expr = boolean_term + ZeroOrMore((AND | OR) + boolean_term)
# integers that are followed by "." are actually floats
integer = Word(nums) + ~Char(".")
"""
def __init__(self, expr):
super(NotAny, self).__init__(expr)
# ~ self.leaveWhitespace()
self.skipWhitespace = False # do NOT use self.leaveWhitespace(), don't want to propagate to exprs
self.mayReturnEmpty = True
self.errmsg = "Found unwanted token, " + _ustr(self.expr)
def parseImpl(self, instring, loc, doActions=True):
if self.expr.canParseNext(instring, loc):
raise ParseException(instring, loc, self.errmsg, self)
return loc, []
def __str__(self):
if hasattr(self, "name"):
return self.name
if self.strRepr is None:
self.strRepr = "~{" + _ustr(self.expr) + "}"
return self.strRepr
class _MultipleMatch(ParseElementEnhance):
def __init__(self, expr, stopOn=None):
super(_MultipleMatch, self).__init__(expr)
self.saveAsList = True
ender = stopOn
if isinstance(ender, basestring):
ender = self._literalStringClass(ender)
self.stopOn(ender)
def stopOn(self, ender):
if isinstance(ender, basestring):
ender = self._literalStringClass(ender)
self.not_ender = ~ender if ender is not None else None
return self
def parseImpl(self, instring, loc, doActions=True):
self_expr_parse = self.expr._parse
self_skip_ignorables = self._skipIgnorables
check_ender = self.not_ender is not None
if check_ender:
try_not_ender = self.not_ender.tryParse
# must be at least one (but first see if we are the stopOn sentinel;
# if so, fail)
if check_ender:
try_not_ender(instring, loc)
loc, tokens = self_expr_parse(instring, loc, doActions, callPreParse=False)
try:
hasIgnoreExprs = (not not self.ignoreExprs)
while 1:
if check_ender:
try_not_ender(instring, loc)
if hasIgnoreExprs:
preloc = self_skip_ignorables(instring, loc)
else:
preloc = loc
loc, tmptokens = self_expr_parse(instring, preloc, doActions)
if tmptokens or tmptokens.haskeys():
tokens += tmptokens
except (ParseException, IndexError):
pass
return loc, tokens
def _setResultsName(self, name, listAllMatches=False):
if __diag__.warn_ungrouped_named_tokens_in_collection:
for e in [self.expr] + getattr(self.expr, 'exprs', []):
if isinstance(e, ParserElement) and e.resultsName:
warnings.warn("{0}: setting results name {1!r} on {2} expression "
"collides with {3!r} on contained expression".format("warn_ungrouped_named_tokens_in_collection",
name,
type(self).__name__,
e.resultsName),
stacklevel=3)
return super(_MultipleMatch, self)._setResultsName(name, listAllMatches)
class OneOrMore(_MultipleMatch):
"""Repetition of one or more of the given expression.
Parameters:
- expr - expression that must match one or more times
- stopOn - (default= ``None``) - expression for a terminating sentinel
(only required if the sentinel would ordinarily match the repetition
expression)
Example::
data_word = Word(alphas)
label = data_word + FollowedBy(':')
attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join))
text = "shape: SQUARE posn: upper left color: BLACK"
OneOrMore(attr_expr).parseString(text).pprint() # Fail! read 'color' as data instead of next label -> [['shape', 'SQUARE color']]
# use stopOn attribute for OneOrMore to avoid reading label string as part of the data
attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
OneOrMore(attr_expr).parseString(text).pprint() # Better -> [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'BLACK']]
# could also be written as
(attr_expr * (1,)).parseString(text).pprint()
"""
def __str__(self):
if hasattr(self, "name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + _ustr(self.expr) + "}..."
return self.strRepr
class ZeroOrMore(_MultipleMatch):
"""Optional repetition of zero or more of the given expression.
Parameters:
- expr - expression that must match zero or more times
- stopOn - (default= ``None``) - expression for a terminating sentinel
(only required if the sentinel would ordinarily match the repetition
expression)
Example: similar to :class:`OneOrMore`
"""
def __init__(self, expr, stopOn=None):
super(ZeroOrMore, self).__init__(expr, stopOn=stopOn)
self.mayReturnEmpty = True
def parseImpl(self, instring, loc, doActions=True):
try:
return super(ZeroOrMore, self).parseImpl(instring, loc, doActions)
except (ParseException, IndexError):
return loc, []
def __str__(self):
if hasattr(self, "name"):
return self.name
if self.strRepr is None:
self.strRepr = "[" + _ustr(self.expr) + "]..."
return self.strRepr
class _NullToken(object):
def __bool__(self):
return False
__nonzero__ = __bool__
def __str__(self):
return ""
class Optional(ParseElementEnhance):
"""Optional matching of the given expression.
Parameters:
- expr - expression that must match zero or more times
- default (optional) - value to be returned if the optional expression is not found.
Example::
# US postal code can be a 5-digit zip, plus optional 4-digit qualifier
zip = Combine(Word(nums, exact=5) + Optional('-' + Word(nums, exact=4)))
zip.runTests('''
# traditional ZIP code
12345
# ZIP+4 form
12101-0001
# invalid ZIP
98765-
''')
prints::
# traditional ZIP code
12345
['12345']
# ZIP+4 form
12101-0001
['12101-0001']
# invalid ZIP
98765-
^
FAIL: Expected end of text (at char 5), (line:1, col:6)
"""
__optionalNotMatched = _NullToken()
def __init__(self, expr, default=__optionalNotMatched):
super(Optional, self).__init__(expr, savelist=False)
self.saveAsList = self.expr.saveAsList
self.defaultValue = default
self.mayReturnEmpty = True
def parseImpl(self, instring, loc, doActions=True):
try:
loc, tokens = self.expr._parse(instring, loc, doActions, callPreParse=False)
except (ParseException, IndexError):
if self.defaultValue is not self.__optionalNotMatched:
if self.expr.resultsName:
tokens = ParseResults([self.defaultValue])
tokens[self.expr.resultsName] = self.defaultValue
else:
tokens = [self.defaultValue]
else:
tokens = []
return loc, tokens
def __str__(self):
if hasattr(self, "name"):
return self.name
if self.strRepr is None:
self.strRepr = "[" + _ustr(self.expr) + "]"
return self.strRepr
class SkipTo(ParseElementEnhance):
"""Token for skipping over all undefined text until the matched
expression is found.
Parameters:
- expr - target expression marking the end of the data to be skipped
- include - (default= ``False``) if True, the target expression is also parsed
(the skipped text and target expression are returned as a 2-element list).
- ignore - (default= ``None``) used to define grammars (typically quoted strings and
comments) that might contain false matches to the target expression
- failOn - (default= ``None``) define expressions that are not allowed to be
included in the skipped test; if found before the target expression is found,
the SkipTo is not a match
Example::
report = '''
Outstanding Issues Report - 1 Jan 2000
# | Severity | Description | Days Open
-----+----------+-------------------------------------------+-----------
101 | Critical | Intermittent system crash | 6
94 | Cosmetic | Spelling error on Login ('log|n') | 14
79 | Minor | System slow when running too many reports | 47
'''
integer = Word(nums)
SEP = Suppress('|')
# use SkipTo to simply match everything up until the next SEP
# - ignore quoted strings, so that a '|' character inside a quoted string does not match
# - parse action will call token.strip() for each matched token, i.e., the description body
string_data = SkipTo(SEP, ignore=quotedString)
string_data.setParseAction(tokenMap(str.strip))
ticket_expr = (integer("issue_num") + SEP
+ string_data("sev") + SEP
+ string_data("desc") + SEP
+ integer("days_open"))
for tkt in ticket_expr.searchString(report):
print tkt.dump()
prints::
['101', 'Critical', 'Intermittent system crash', '6']
- days_open: 6
- desc: Intermittent system crash
- issue_num: 101
- sev: Critical
['94', 'Cosmetic', "Spelling error on Login ('log|n')", '14']
- days_open: 14
- desc: Spelling error on Login ('log|n')
- issue_num: 94
- sev: Cosmetic
['79', 'Minor', 'System slow when running too many reports', '47']
- days_open: 47
- desc: System slow when running too many reports
- issue_num: 79
- sev: Minor
"""
def __init__(self, other, include=False, ignore=None, failOn=None):
super(SkipTo, self).__init__(other)
self.ignoreExpr = ignore
self.mayReturnEmpty = True
self.mayIndexError = False
self.includeMatch = include
self.saveAsList = False
if isinstance(failOn, basestring):
self.failOn = self._literalStringClass(failOn)
else:
self.failOn = failOn
self.errmsg = "No match found for " + _ustr(self.expr)
def parseImpl(self, instring, loc, doActions=True):
startloc = loc
instrlen = len(instring)
expr = self.expr
expr_parse = self.expr._parse
self_failOn_canParseNext = self.failOn.canParseNext if self.failOn is not None else None
self_ignoreExpr_tryParse = self.ignoreExpr.tryParse if self.ignoreExpr is not None else None
tmploc = loc
while tmploc <= instrlen:
if self_failOn_canParseNext is not None:
# break if failOn expression matches
if self_failOn_canParseNext(instring, tmploc):
break
if self_ignoreExpr_tryParse is not None:
# advance past ignore expressions
while 1:
try:
tmploc = self_ignoreExpr_tryParse(instring, tmploc)
except ParseBaseException:
break
try:
expr_parse(instring, tmploc, doActions=False, callPreParse=False)
except (ParseException, IndexError):
# no match, advance loc in string
tmploc += 1
else:
# matched skipto expr, done
break
else:
# ran off the end of the input string without matching skipto expr, fail
raise ParseException(instring, loc, self.errmsg, self)
# build up return values
loc = tmploc
skiptext = instring[startloc:loc]
skipresult = ParseResults(skiptext)
if self.includeMatch:
loc, mat = expr_parse(instring, loc, doActions, callPreParse=False)
skipresult += mat
return loc, skipresult
class Forward(ParseElementEnhance):
"""Forward declaration of an expression to be defined later -
used for recursive grammars, such as algebraic infix notation.
When the expression is known, it is assigned to the ``Forward``
variable using the '<<' operator.
Note: take care when assigning to ``Forward`` not to overlook
precedence of operators.
Specifically, '|' has a lower precedence than '<<', so that::
fwdExpr << a | b | c
will actually be evaluated as::
(fwdExpr << a) | b | c
thereby leaving b and c out as parseable alternatives. It is recommended that you
explicitly group the values inserted into the ``Forward``::
fwdExpr << (a | b | c)
Converting to use the '<<=' operator instead will avoid this problem.
See :class:`ParseResults.pprint` for an example of a recursive
parser created using ``Forward``.
"""
def __init__(self, other=None):
super(Forward, self).__init__(other, savelist=False)
def __lshift__(self, other):
if isinstance(other, basestring):
other = self._literalStringClass(other)
self.expr = other
self.strRepr = None
self.mayIndexError = self.expr.mayIndexError
self.mayReturnEmpty = self.expr.mayReturnEmpty
self.setWhitespaceChars(self.expr.whiteChars)
self.skipWhitespace = self.expr.skipWhitespace
self.saveAsList = self.expr.saveAsList
self.ignoreExprs.extend(self.expr.ignoreExprs)
return self
def __ilshift__(self, other):
return self << other
def leaveWhitespace(self):
self.skipWhitespace = False
return self
def streamline(self):
if not self.streamlined:
self.streamlined = True
if self.expr is not None:
self.expr.streamline()
return self
def validate(self, validateTrace=None):
if validateTrace is None:
validateTrace = []
if self not in validateTrace:
tmp = validateTrace[:] + [self]
if self.expr is not None:
self.expr.validate(tmp)
self.checkRecursion([])
def __str__(self):
if hasattr(self, "name"):
return self.name
if self.strRepr is not None:
return self.strRepr
# Avoid infinite recursion by setting a temporary strRepr
self.strRepr = ": ..."
# Use the string representation of main expression.
retString = '...'
try:
if self.expr is not None:
retString = _ustr(self.expr)[:1000]
else:
retString = "None"
finally:
self.strRepr = self.__class__.__name__ + ": " + retString
return self.strRepr
def copy(self):
if self.expr is not None:
return super(Forward, self).copy()
else:
ret = Forward()
ret <<= self
return ret
def _setResultsName(self, name, listAllMatches=False):
if __diag__.warn_name_set_on_empty_Forward:
if self.expr is None:
warnings.warn("{0}: setting results name {0!r} on {1} expression "
"that has no contained expression".format("warn_name_set_on_empty_Forward",
name,
type(self).__name__),
stacklevel=3)
return super(Forward, self)._setResultsName(name, listAllMatches)
class TokenConverter(ParseElementEnhance):
"""
Abstract subclass of :class:`ParseExpression`, for converting parsed results.
"""
def __init__(self, expr, savelist=False):
super(TokenConverter, self).__init__(expr) # , savelist)
self.saveAsList = False
class Combine(TokenConverter):
"""Converter to concatenate all matching tokens to a single string.
By default, the matching patterns must also be contiguous in the
input string; this can be disabled by specifying
``'adjacent=False'`` in the constructor.
Example::
real = Word(nums) + '.' + Word(nums)
print(real.parseString('3.1416')) # -> ['3', '.', '1416']
# will also erroneously match the following
print(real.parseString('3. 1416')) # -> ['3', '.', '1416']
real = Combine(Word(nums) + '.' + Word(nums))
print(real.parseString('3.1416')) # -> ['3.1416']
# no match when there are internal spaces
print(real.parseString('3. 1416')) # -> Exception: Expected W:(0123...)
"""
def __init__(self, expr, joinString="", adjacent=True):
super(Combine, self).__init__(expr)
# suppress whitespace-stripping in contained parse expressions, but re-enable it on the Combine itself
if adjacent:
self.leaveWhitespace()
self.adjacent = adjacent
self.skipWhitespace = True
self.joinString = joinString
self.callPreparse = True
def ignore(self, other):
if self.adjacent:
ParserElement.ignore(self, other)
else:
super(Combine, self).ignore(other)
return self
def postParse(self, instring, loc, tokenlist):
retToks = tokenlist.copy()
del retToks[:]
retToks += ParseResults(["".join(tokenlist._asStringList(self.joinString))], modal=self.modalResults)
if self.resultsName and retToks.haskeys():
return [retToks]
else:
return retToks
class Group(TokenConverter):
"""Converter to return the matched tokens as a list - useful for
returning tokens of :class:`ZeroOrMore` and :class:`OneOrMore` expressions.
Example::
ident = Word(alphas)
num = Word(nums)
term = ident | num
func = ident + Optional(delimitedList(term))
print(func.parseString("fn a, b, 100")) # -> ['fn', 'a', 'b', '100']
func = ident + Group(Optional(delimitedList(term)))
print(func.parseString("fn a, b, 100")) # -> ['fn', ['a', 'b', '100']]
"""
def __init__(self, expr):
super(Group, self).__init__(expr)
self.saveAsList = True
def postParse(self, instring, loc, tokenlist):
return [tokenlist]
class Dict(TokenConverter):
"""Converter to return a repetitive expression as a list, but also
as a dictionary. Each element can also be referenced using the first
token in the expression as its key. Useful for tabular report
scraping when the first column can be used as a item key.
Example::
data_word = Word(alphas)
label = data_word + FollowedBy(':')
attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join))
text = "shape: SQUARE posn: upper left color: light blue texture: burlap"
attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
# print attributes as plain groups
print(OneOrMore(attr_expr).parseString(text).dump())
# instead of OneOrMore(expr), parse using Dict(OneOrMore(Group(expr))) - Dict will auto-assign names
result = Dict(OneOrMore(Group(attr_expr))).parseString(text)
print(result.dump())
# access named fields as dict entries, or output as dict
print(result['shape'])
print(result.asDict())
prints::
['shape', 'SQUARE', 'posn', 'upper left', 'color', 'light blue', 'texture', 'burlap']
[['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']]
- color: light blue
- posn: upper left
- shape: SQUARE
- texture: burlap
SQUARE
{'color': 'light blue', 'posn': 'upper left', 'texture': 'burlap', 'shape': 'SQUARE'}
See more examples at :class:`ParseResults` of accessing fields by results name.
"""
def __init__(self, expr):
super(Dict, self).__init__(expr)
self.saveAsList = True
def postParse(self, instring, loc, tokenlist):
for i, tok in enumerate(tokenlist):
if len(tok) == 0:
continue
ikey = tok[0]
if isinstance(ikey, int):
ikey = _ustr(tok[0]).strip()
if len(tok) == 1:
tokenlist[ikey] = _ParseResultsWithOffset("", i)
elif len(tok) == 2 and not isinstance(tok[1], ParseResults):
tokenlist[ikey] = _ParseResultsWithOffset(tok[1], i)
else:
dictvalue = tok.copy() # ParseResults(i)
del dictvalue[0]
if len(dictvalue) != 1 or (isinstance(dictvalue, ParseResults) and dictvalue.haskeys()):
tokenlist[ikey] = _ParseResultsWithOffset(dictvalue, i)
else:
tokenlist[ikey] = _ParseResultsWithOffset(dictvalue[0], i)
if self.resultsName:
return [tokenlist]
else:
return tokenlist
class Suppress(TokenConverter):
"""Converter for ignoring the results of a parsed expression.
Example::
source = "a, b, c,d"
wd = Word(alphas)
wd_list1 = wd + ZeroOrMore(',' + wd)
print(wd_list1.parseString(source))
# often, delimiters that are useful during parsing are just in the
# way afterward - use Suppress to keep them out of the parsed output
wd_list2 = wd + ZeroOrMore(Suppress(',') + wd)
print(wd_list2.parseString(source))
prints::
['a', ',', 'b', ',', 'c', ',', 'd']
['a', 'b', 'c', 'd']
(See also :class:`delimitedList`.)
"""
def postParse(self, instring, loc, tokenlist):
return []
def suppress(self):
return self
class OnlyOnce(object):
"""Wrapper for parse actions, to ensure they are only called once.
"""
def __init__(self, methodCall):
self.callable = _trim_arity(methodCall)
self.called = False
def __call__(self, s, l, t):
if not self.called:
results = self.callable(s, l, t)
self.called = True
return results
raise ParseException(s, l, "")
def reset(self):
self.called = False
def traceParseAction(f):
"""Decorator for debugging parse actions.
When the parse action is called, this decorator will print
``">> entering method-name(line:<current_source_line>, <parse_location>, <matched_tokens>)"``.
When the parse action completes, the decorator will print
``"<<"`` followed by the returned value, or any exception that the parse action raised.
Example::
wd = Word(alphas)
@traceParseAction
def remove_duplicate_chars(tokens):
return ''.join(sorted(set(''.join(tokens))))
wds = OneOrMore(wd).setParseAction(remove_duplicate_chars)
print(wds.parseString("slkdjs sld sldd sdlf sdljf"))
prints::
>>entering remove_duplicate_chars(line: 'slkdjs sld sldd sdlf sdljf', 0, (['slkdjs', 'sld', 'sldd', 'sdlf', 'sdljf'], {}))
<<leaving remove_duplicate_chars (ret: 'dfjkls')
['dfjkls']
"""
f = _trim_arity(f)
def z(*paArgs):
thisFunc = f.__name__
s, l, t = paArgs[-3:]
if len(paArgs) > 3:
thisFunc = paArgs[0].__class__.__name__ + '.' + thisFunc
sys.stderr.write(">>entering %s(line: '%s', %d, %r)\n" % (thisFunc, line(l, s), l, t))
try:
ret = f(*paArgs)
except Exception as exc:
sys.stderr.write("<<leaving %s (exception: %s)\n" % (thisFunc, exc))
raise
sys.stderr.write("<<leaving %s (ret: %r)\n" % (thisFunc, ret))
return ret
try:
z.__name__ = f.__name__
except AttributeError:
pass
return z
#
# global helpers
#
def delimitedList(expr, delim=",", combine=False):
"""Helper to define a delimited list of expressions - the delimiter
defaults to ','. By default, the list elements and delimiters can
have intervening whitespace, and comments, but this can be
overridden by passing ``combine=True`` in the constructor. If
``combine`` is set to ``True``, the matching tokens are
returned as a single token string, with the delimiters included;
otherwise, the matching tokens are returned as a list of tokens,
with the delimiters suppressed.
Example::
delimitedList(Word(alphas)).parseString("aa,bb,cc") # -> ['aa', 'bb', 'cc']
delimitedList(Word(hexnums), delim=':', combine=True).parseString("AA:BB:CC:DD:EE") # -> ['AA:BB:CC:DD:EE']
"""
dlName = _ustr(expr) + " [" + _ustr(delim) + " " + _ustr(expr) + "]..."
if combine:
return Combine(expr + ZeroOrMore(delim + expr)).setName(dlName)
else:
return (expr + ZeroOrMore(Suppress(delim) + expr)).setName(dlName)
def countedArray(expr, intExpr=None):
"""Helper to define a counted list of expressions.
This helper defines a pattern of the form::
integer expr expr expr...
where the leading integer tells how many expr expressions follow.
The matched tokens returns the array of expr tokens as a list - the
leading count token is suppressed.
If ``intExpr`` is specified, it should be a pyparsing expression
that produces an integer value.
Example::
countedArray(Word(alphas)).parseString('2 ab cd ef') # -> ['ab', 'cd']
# in this parser, the leading integer value is given in binary,
# '10' indicating that 2 values are in the array
binaryConstant = Word('01').setParseAction(lambda t: int(t[0], 2))
countedArray(Word(alphas), intExpr=binaryConstant).parseString('10 ab cd ef') # -> ['ab', 'cd']
"""
arrayExpr = Forward()
def countFieldParseAction(s, l, t):
n = t[0]
arrayExpr << (n and Group(And([expr] * n)) or Group(empty))
return []
if intExpr is None:
intExpr = Word(nums).setParseAction(lambda t: int(t[0]))
else:
intExpr = intExpr.copy()
intExpr.setName("arrayLen")
intExpr.addParseAction(countFieldParseAction, callDuringTry=True)
return (intExpr + arrayExpr).setName('(len) ' + _ustr(expr) + '...')
def _flatten(L):
ret = []
for i in L:
if isinstance(i, list):
ret.extend(_flatten(i))
else:
ret.append(i)
return ret
def matchPreviousLiteral(expr):
"""Helper to define an expression that is indirectly defined from
the tokens matched in a previous expression, that is, it looks for
a 'repeat' of a previous expression. For example::
first = Word(nums)
second = matchPreviousLiteral(first)
matchExpr = first + ":" + second
will match ``"1:1"``, but not ``"1:2"``. Because this
matches a previous literal, will also match the leading
``"1:1"`` in ``"1:10"``. If this is not desired, use
:class:`matchPreviousExpr`. Do *not* use with packrat parsing
enabled.
"""
rep = Forward()
def copyTokenToRepeater(s, l, t):
if t:
if len(t) == 1:
rep << t[0]
else:
# flatten t tokens
tflat = _flatten(t.asList())
rep << And(Literal(tt) for tt in tflat)
else:
rep << Empty()
expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
rep.setName('(prev) ' + _ustr(expr))
return rep
def matchPreviousExpr(expr):
"""Helper to define an expression that is indirectly defined from
the tokens matched in a previous expression, that is, it looks for
a 'repeat' of a previous expression. For example::
first = Word(nums)
second = matchPreviousExpr(first)
matchExpr = first + ":" + second
will match ``"1:1"``, but not ``"1:2"``. Because this
matches by expressions, will *not* match the leading ``"1:1"``
in ``"1:10"``; the expressions are evaluated first, and then
compared, so ``"1"`` is compared with ``"10"``. Do *not* use
with packrat parsing enabled.
"""
rep = Forward()
e2 = expr.copy()
rep <<= e2
def copyTokenToRepeater(s, l, t):
matchTokens = _flatten(t.asList())
def mustMatchTheseTokens(s, l, t):
theseTokens = _flatten(t.asList())
if theseTokens != matchTokens:
raise ParseException('', 0, '')
rep.setParseAction(mustMatchTheseTokens, callDuringTry=True)
expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
rep.setName('(prev) ' + _ustr(expr))
return rep
def _escapeRegexRangeChars(s):
# ~ escape these chars: ^-[]
for c in r"\^-[]":
s = s.replace(c, _bslash + c)
s = s.replace("\n", r"\n")
s = s.replace("\t", r"\t")
return _ustr(s)
def oneOf(strs, caseless=False, useRegex=True, asKeyword=False):
"""Helper to quickly define a set of alternative Literals, and makes
sure to do longest-first testing when there is a conflict,
regardless of the input order, but returns
a :class:`MatchFirst` for best performance.
Parameters:
- strs - a string of space-delimited literals, or a collection of
string literals
- caseless - (default= ``False``) - treat all literals as
caseless
- useRegex - (default= ``True``) - as an optimization, will
generate a Regex object; otherwise, will generate
a :class:`MatchFirst` object (if ``caseless=True`` or ``asKeyword=True``, or if
creating a :class:`Regex` raises an exception)
- asKeyword - (default=``False``) - enforce Keyword-style matching on the
generated expressions
Example::
comp_oper = oneOf("< = > <= >= !=")
var = Word(alphas)
number = Word(nums)
term = var | number
comparison_expr = term + comp_oper + term
print(comparison_expr.searchString("B = 12 AA=23 B<=AA AA>12"))
prints::
[['B', '=', '12'], ['AA', '=', '23'], ['B', '<=', 'AA'], ['AA', '>', '12']]
"""
if isinstance(caseless, basestring):
warnings.warn("More than one string argument passed to oneOf, pass "
"choices as a list or space-delimited string", stacklevel=2)
if caseless:
isequal = (lambda a, b: a.upper() == b.upper())
masks = (lambda a, b: b.upper().startswith(a.upper()))
parseElementClass = CaselessKeyword if asKeyword else CaselessLiteral
else:
isequal = (lambda a, b: a == b)
masks = (lambda a, b: b.startswith(a))
parseElementClass = Keyword if asKeyword else Literal
symbols = []
if isinstance(strs, basestring):
symbols = strs.split()
elif isinstance(strs, Iterable):
symbols = list(strs)
else:
warnings.warn("Invalid argument to oneOf, expected string or iterable",
SyntaxWarning, stacklevel=2)
if not symbols:
return NoMatch()
if not asKeyword:
# if not producing keywords, need to reorder to take care to avoid masking
# longer choices with shorter ones
i = 0
while i < len(symbols) - 1:
cur = symbols[i]
for j, other in enumerate(symbols[i + 1:]):
if isequal(other, cur):
del symbols[i + j + 1]
break
elif masks(cur, other):
del symbols[i + j + 1]
symbols.insert(i, other)
break
else:
i += 1
if not (caseless or asKeyword) and useRegex:
# ~ print (strs, "->", "|".join([_escapeRegexChars(sym) for sym in symbols]))
try:
if len(symbols) == len("".join(symbols)):
return Regex("[%s]" % "".join(_escapeRegexRangeChars(sym) for sym in symbols)).setName(' | '.join(symbols))
else:
return Regex("|".join(re.escape(sym) for sym in symbols)).setName(' | '.join(symbols))
except Exception:
warnings.warn("Exception creating Regex for oneOf, building MatchFirst",
SyntaxWarning, stacklevel=2)
# last resort, just use MatchFirst
return MatchFirst(parseElementClass(sym) for sym in symbols).setName(' | '.join(symbols))
def dictOf(key, value):
"""Helper to easily and clearly define a dictionary by specifying
the respective patterns for the key and value. Takes care of
defining the :class:`Dict`, :class:`ZeroOrMore`, and
:class:`Group` tokens in the proper order. The key pattern
can include delimiting markers or punctuation, as long as they are
suppressed, thereby leaving the significant key text. The value
pattern can include named results, so that the :class:`Dict` results
can include named token fields.
Example::
text = "shape: SQUARE posn: upper left color: light blue texture: burlap"
attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
print(OneOrMore(attr_expr).parseString(text).dump())
attr_label = label
attr_value = Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)
# similar to Dict, but simpler call format
result = dictOf(attr_label, attr_value).parseString(text)
print(result.dump())
print(result['shape'])
print(result.shape) # object attribute access works too
print(result.asDict())
prints::
[['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']]
- color: light blue
- posn: upper left
- shape: SQUARE
- texture: burlap
SQUARE
SQUARE
{'color': 'light blue', 'shape': 'SQUARE', 'posn': 'upper left', 'texture': 'burlap'}
"""
return Dict(OneOrMore(Group(key + value)))
def originalTextFor(expr, asString=True):
"""Helper to return the original, untokenized text for a given
expression. Useful to restore the parsed fields of an HTML start
tag into the raw tag text itself, or to revert separate tokens with
intervening whitespace back to the original matching input text. By
default, returns astring containing the original parsed text.
If the optional ``asString`` argument is passed as
``False``, then the return value is
a :class:`ParseResults` containing any results names that
were originally matched, and a single token containing the original
matched text from the input string. So if the expression passed to
:class:`originalTextFor` contains expressions with defined
results names, you must set ``asString`` to ``False`` if you
want to preserve those results name values.
Example::
src = "this is test <b> bold <i>text</i> </b> normal text "
for tag in ("b", "i"):
opener, closer = makeHTMLTags(tag)
patt = originalTextFor(opener + SkipTo(closer) + closer)
print(patt.searchString(src)[0])
prints::
['<b> bold <i>text</i> </b>']
['<i>text</i>']
"""
locMarker = Empty().setParseAction(lambda s, loc, t: loc)
endlocMarker = locMarker.copy()
endlocMarker.callPreparse = False
matchExpr = locMarker("_original_start") + expr + endlocMarker("_original_end")
if asString:
extractText = lambda s, l, t: s[t._original_start: t._original_end]
else:
def extractText(s, l, t):
t[:] = [s[t.pop('_original_start'):t.pop('_original_end')]]
matchExpr.setParseAction(extractText)
matchExpr.ignoreExprs = expr.ignoreExprs
return matchExpr
def ungroup(expr):
"""Helper to undo pyparsing's default grouping of And expressions,
even if all but one are non-empty.
"""
return TokenConverter(expr).addParseAction(lambda t: t[0])
def locatedExpr(expr):
"""Helper to decorate a returned token with its starting and ending
locations in the input string.
This helper adds the following results names:
- locn_start = location where matched expression begins
- locn_end = location where matched expression ends
- value = the actual parsed results
Be careful if the input text contains ``<TAB>`` characters, you
may want to call :class:`ParserElement.parseWithTabs`
Example::
wd = Word(alphas)
for match in locatedExpr(wd).searchString("ljsdf123lksdjjf123lkkjj1222"):
print(match)
prints::
[[0, 'ljsdf', 5]]
[[8, 'lksdjjf', 15]]
[[18, 'lkkjj', 23]]
"""
locator = Empty().setParseAction(lambda s, l, t: l)
return Group(locator("locn_start") + expr("value") + locator.copy().leaveWhitespace()("locn_end"))
# convenience constants for positional expressions
empty = Empty().setName("empty")
lineStart = LineStart().setName("lineStart")
lineEnd = LineEnd().setName("lineEnd")
stringStart = StringStart().setName("stringStart")
stringEnd = StringEnd().setName("stringEnd")
_escapedPunc = Word(_bslash, r"\[]-*.$+^?()~ ", exact=2).setParseAction(lambda s, l, t: t[0][1])
_escapedHexChar = Regex(r"\\0?[xX][0-9a-fA-F]+").setParseAction(lambda s, l, t: unichr(int(t[0].lstrip(r'\0x'), 16)))
_escapedOctChar = Regex(r"\\0[0-7]+").setParseAction(lambda s, l, t: unichr(int(t[0][1:], 8)))
_singleChar = _escapedPunc | _escapedHexChar | _escapedOctChar | CharsNotIn(r'\]', exact=1)
_charRange = Group(_singleChar + Suppress("-") + _singleChar)
_reBracketExpr = Literal("[") + Optional("^").setResultsName("negate") + Group(OneOrMore(_charRange | _singleChar)).setResultsName("body") + "]"
def srange(s):
r"""Helper to easily define string ranges for use in Word
construction. Borrows syntax from regexp '[]' string range
definitions::
srange("[0-9]") -> "0123456789"
srange("[a-z]") -> "abcdefghijklmnopqrstuvwxyz"
srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_"
The input string must be enclosed in []'s, and the returned string
is the expanded character set joined into a single string. The
values enclosed in the []'s may be:
- a single character
- an escaped character with a leading backslash (such as ``\-``
or ``\]``)
- an escaped hex character with a leading ``'\x'``
(``\x21``, which is a ``'!'`` character) (``\0x##``
is also supported for backwards compatibility)
- an escaped octal character with a leading ``'\0'``
(``\041``, which is a ``'!'`` character)
- a range of any of the above, separated by a dash (``'a-z'``,
etc.)
- any combination of the above (``'aeiouy'``,
``'a-zA-Z0-9_$'``, etc.)
"""
_expanded = lambda p: p if not isinstance(p, ParseResults) else ''.join(unichr(c) for c in range(ord(p[0]), ord(p[1]) + 1))
try:
return "".join(_expanded(part) for part in _reBracketExpr.parseString(s).body)
except Exception:
return ""
def matchOnlyAtCol(n):
"""Helper method for defining parse actions that require matching at
a specific column in the input text.
"""
def verifyCol(strg, locn, toks):
if col(locn, strg) != n:
raise ParseException(strg, locn, "matched token not at column %d" % n)
return verifyCol
def replaceWith(replStr):
"""Helper method for common parse actions that simply return
a literal value. Especially useful when used with
:class:`transformString<ParserElement.transformString>` ().
Example::
num = Word(nums).setParseAction(lambda toks: int(toks[0]))
na = oneOf("N/A NA").setParseAction(replaceWith(math.nan))
term = na | num
OneOrMore(term).parseString("324 234 N/A 234") # -> [324, 234, nan, 234]
"""
return lambda s, l, t: [replStr]
def removeQuotes(s, l, t):
"""Helper parse action for removing quotation marks from parsed
quoted strings.
Example::
# by default, quotation marks are included in parsed results
quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["'Now is the Winter of our Discontent'"]
# use removeQuotes to strip quotation marks from parsed results
quotedString.setParseAction(removeQuotes)
quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["Now is the Winter of our Discontent"]
"""
return t[0][1:-1]
def tokenMap(func, *args):
"""Helper to define a parse action by mapping a function to all
elements of a ParseResults list. If any additional args are passed,
they are forwarded to the given function as additional arguments
after the token, as in
``hex_integer = Word(hexnums).setParseAction(tokenMap(int, 16))``,
which will convert the parsed data to an integer using base 16.
Example (compare the last to example in :class:`ParserElement.transformString`::
hex_ints = OneOrMore(Word(hexnums)).setParseAction(tokenMap(int, 16))
hex_ints.runTests('''
00 11 22 aa FF 0a 0d 1a
''')
upperword = Word(alphas).setParseAction(tokenMap(str.upper))
OneOrMore(upperword).runTests('''
my kingdom for a horse
''')
wd = Word(alphas).setParseAction(tokenMap(str.title))
OneOrMore(wd).setParseAction(' '.join).runTests('''
now is the winter of our discontent made glorious summer by this sun of york
''')
prints::
00 11 22 aa FF 0a 0d 1a
[0, 17, 34, 170, 255, 10, 13, 26]
my kingdom for a horse
['MY', 'KINGDOM', 'FOR', 'A', 'HORSE']
now is the winter of our discontent made glorious summer by this sun of york
['Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York']
"""
def pa(s, l, t):
return [func(tokn, *args) for tokn in t]
try:
func_name = getattr(func, '__name__',
getattr(func, '__class__').__name__)
except Exception:
func_name = str(func)
pa.__name__ = func_name
return pa
upcaseTokens = tokenMap(lambda t: _ustr(t).upper())
"""(Deprecated) Helper parse action to convert tokens to upper case.
Deprecated in favor of :class:`pyparsing_common.upcaseTokens`"""
downcaseTokens = tokenMap(lambda t: _ustr(t).lower())
"""(Deprecated) Helper parse action to convert tokens to lower case.
Deprecated in favor of :class:`pyparsing_common.downcaseTokens`"""
def _makeTags(tagStr, xml,
suppress_LT=Suppress("<"),
suppress_GT=Suppress(">")):
"""Internal helper to construct opening and closing tag expressions, given a tag name"""
if isinstance(tagStr, basestring):
resname = tagStr
tagStr = Keyword(tagStr, caseless=not xml)
else:
resname = tagStr.name
tagAttrName = Word(alphas, alphanums + "_-:")
if xml:
tagAttrValue = dblQuotedString.copy().setParseAction(removeQuotes)
openTag = (suppress_LT
+ tagStr("tag")
+ Dict(ZeroOrMore(Group(tagAttrName + Suppress("=") + tagAttrValue)))
+ Optional("/", default=[False])("empty").setParseAction(lambda s, l, t: t[0] == '/')
+ suppress_GT)
else:
tagAttrValue = quotedString.copy().setParseAction(removeQuotes) | Word(printables, excludeChars=">")
openTag = (suppress_LT
+ tagStr("tag")
+ Dict(ZeroOrMore(Group(tagAttrName.setParseAction(downcaseTokens)
+ Optional(Suppress("=") + tagAttrValue))))
+ Optional("/", default=[False])("empty").setParseAction(lambda s, l, t: t[0] == '/')
+ suppress_GT)
closeTag = Combine(_L("</") + tagStr + ">", adjacent=False)
openTag.setName("<%s>" % resname)
# add start<tagname> results name in parse action now that ungrouped names are not reported at two levels
openTag.addParseAction(lambda t: t.__setitem__("start" + "".join(resname.replace(":", " ").title().split()), t.copy()))
closeTag = closeTag("end" + "".join(resname.replace(":", " ").title().split())).setName("</%s>" % resname)
openTag.tag = resname
closeTag.tag = resname
openTag.tag_body = SkipTo(closeTag())
return openTag, closeTag
def makeHTMLTags(tagStr):
"""Helper to construct opening and closing tag expressions for HTML,
given a tag name. Matches tags in either upper or lower case,
attributes with namespaces and with quoted or unquoted values.
Example::
text = '<td>More info at the <a href="https://github.com/pyparsing/pyparsing/wiki">pyparsing</a> wiki page</td>'
# makeHTMLTags returns pyparsing expressions for the opening and
# closing tags as a 2-tuple
a, a_end = makeHTMLTags("A")
link_expr = a + SkipTo(a_end)("link_text") + a_end
for link in link_expr.searchString(text):
# attributes in the <A> tag (like "href" shown here) are
# also accessible as named results
print(link.link_text, '->', link.href)
prints::
pyparsing -> https://github.com/pyparsing/pyparsing/wiki
"""
return _makeTags(tagStr, False)
def makeXMLTags(tagStr):
"""Helper to construct opening and closing tag expressions for XML,
given a tag name. Matches tags only in the given upper/lower case.
Example: similar to :class:`makeHTMLTags`
"""
return _makeTags(tagStr, True)
def withAttribute(*args, **attrDict):
"""Helper to create a validating parse action to be used with start
tags created with :class:`makeXMLTags` or
:class:`makeHTMLTags`. Use ``withAttribute`` to qualify
a starting tag with a required attribute value, to avoid false
matches on common tags such as ``<TD>`` or ``<DIV>``.
Call ``withAttribute`` with a series of attribute names and
values. Specify the list of filter attributes names and values as:
- keyword arguments, as in ``(align="right")``, or
- as an explicit dict with ``**`` operator, when an attribute
name is also a Python reserved word, as in ``**{"class":"Customer", "align":"right"}``
- a list of name-value tuples, as in ``(("ns1:class", "Customer"), ("ns2:align", "right"))``
For attribute names with a namespace prefix, you must use the second
form. Attribute names are matched insensitive to upper/lower case.
If just testing for ``class`` (with or without a namespace), use
:class:`withClass`.
To verify that the attribute exists, but without specifying a value,
pass ``withAttribute.ANY_VALUE`` as the value.
Example::
html = '''
<div>
Some text
<div type="grid">1 4 0 1 0</div>
<div type="graph">1,3 2,3 1,1</div>
<div>this has no type</div>
</div>
'''
div,div_end = makeHTMLTags("div")
# only match div tag having a type attribute with value "grid"
div_grid = div().setParseAction(withAttribute(type="grid"))
grid_expr = div_grid + SkipTo(div | div_end)("body")
for grid_header in grid_expr.searchString(html):
print(grid_header.body)
# construct a match with any div tag having a type attribute, regardless of the value
div_any_type = div().setParseAction(withAttribute(type=withAttribute.ANY_VALUE))
div_expr = div_any_type + SkipTo(div | div_end)("body")
for div_header in div_expr.searchString(html):
print(div_header.body)
prints::
1 4 0 1 0
1 4 0 1 0
1,3 2,3 1,1
"""
if args:
attrs = args[:]
else:
attrs = attrDict.items()
attrs = [(k, v) for k, v in attrs]
def pa(s, l, tokens):
for attrName, attrValue in attrs:
if attrName not in tokens:
raise ParseException(s, l, "no matching attribute " + attrName)
if attrValue != withAttribute.ANY_VALUE and tokens[attrName] != attrValue:
raise ParseException(s, l, "attribute '%s' has value '%s', must be '%s'" %
(attrName, tokens[attrName], attrValue))
return pa
withAttribute.ANY_VALUE = object()
def withClass(classname, namespace=''):
"""Simplified version of :class:`withAttribute` when
matching on a div class - made difficult because ``class`` is
a reserved word in Python.
Example::
html = '''
<div>
Some text
<div class="grid">1 4 0 1 0</div>
<div class="graph">1,3 2,3 1,1</div>
<div>this <div> has no class</div>
</div>
'''
div,div_end = makeHTMLTags("div")
div_grid = div().setParseAction(withClass("grid"))
grid_expr = div_grid + SkipTo(div | div_end)("body")
for grid_header in grid_expr.searchString(html):
print(grid_header.body)
div_any_type = div().setParseAction(withClass(withAttribute.ANY_VALUE))
div_expr = div_any_type + SkipTo(div | div_end)("body")
for div_header in div_expr.searchString(html):
print(div_header.body)
prints::
1 4 0 1 0
1 4 0 1 0
1,3 2,3 1,1
"""
classattr = "%s:class" % namespace if namespace else "class"
return withAttribute(**{classattr: classname})
opAssoc = SimpleNamespace()
opAssoc.LEFT = object()
opAssoc.RIGHT = object()
def infixNotation(baseExpr, opList, lpar=Suppress('('), rpar=Suppress(')')):
"""Helper method for constructing grammars of expressions made up of
operators working in a precedence hierarchy. Operators may be unary
or binary, left- or right-associative. Parse actions can also be
attached to operator expressions. The generated parser will also
recognize the use of parentheses to override operator precedences
(see example below).
Note: if you define a deep operator list, you may see performance
issues when using infixNotation. See
:class:`ParserElement.enablePackrat` for a mechanism to potentially
improve your parser performance.
Parameters:
- baseExpr - expression representing the most basic element for the
nested
- opList - list of tuples, one for each operator precedence level
in the expression grammar; each tuple is of the form ``(opExpr,
numTerms, rightLeftAssoc, parseAction)``, where:
- opExpr is the pyparsing expression for the operator; may also
be a string, which will be converted to a Literal; if numTerms
is 3, opExpr is a tuple of two expressions, for the two
operators separating the 3 terms
- numTerms is the number of terms for this operator (must be 1,
2, or 3)
- rightLeftAssoc is the indicator whether the operator is right
or left associative, using the pyparsing-defined constants
``opAssoc.RIGHT`` and ``opAssoc.LEFT``.
- parseAction is the parse action to be associated with
expressions matching this operator expression (the parse action
tuple member may be omitted); if the parse action is passed
a tuple or list of functions, this is equivalent to calling
``setParseAction(*fn)``
(:class:`ParserElement.setParseAction`)
- lpar - expression for matching left-parentheses
(default= ``Suppress('(')``)
- rpar - expression for matching right-parentheses
(default= ``Suppress(')')``)
Example::
# simple example of four-function arithmetic with ints and
# variable names
integer = pyparsing_common.signed_integer
varname = pyparsing_common.identifier
arith_expr = infixNotation(integer | varname,
[
('-', 1, opAssoc.RIGHT),
(oneOf('* /'), 2, opAssoc.LEFT),
(oneOf('+ -'), 2, opAssoc.LEFT),
])
arith_expr.runTests('''
5+3*6
(5+3)*6
-2--11
''', fullDump=False)
prints::
5+3*6
[[5, '+', [3, '*', 6]]]
(5+3)*6
[[[5, '+', 3], '*', 6]]
-2--11
[[['-', 2], '-', ['-', 11]]]
"""
# captive version of FollowedBy that does not do parse actions or capture results names
class _FB(FollowedBy):
def parseImpl(self, instring, loc, doActions=True):
self.expr.tryParse(instring, loc)
return loc, []
ret = Forward()
lastExpr = baseExpr | (lpar + ret + rpar)
for i, operDef in enumerate(opList):
opExpr, arity, rightLeftAssoc, pa = (operDef + (None, ))[:4]
termName = "%s term" % opExpr if arity < 3 else "%s%s term" % opExpr
if arity == 3:
if opExpr is None or len(opExpr) != 2:
raise ValueError(
"if numterms=3, opExpr must be a tuple or list of two expressions")
opExpr1, opExpr2 = opExpr
thisExpr = Forward().setName(termName)
if rightLeftAssoc == opAssoc.LEFT:
if arity == 1:
matchExpr = _FB(lastExpr + opExpr) + Group(lastExpr + OneOrMore(opExpr))
elif arity == 2:
if opExpr is not None:
matchExpr = _FB(lastExpr + opExpr + lastExpr) + Group(lastExpr + OneOrMore(opExpr + lastExpr))
else:
matchExpr = _FB(lastExpr + lastExpr) + Group(lastExpr + OneOrMore(lastExpr))
elif arity == 3:
matchExpr = (_FB(lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr)
+ Group(lastExpr + OneOrMore(opExpr1 + lastExpr + opExpr2 + lastExpr)))
else:
raise ValueError("operator must be unary (1), binary (2), or ternary (3)")
elif rightLeftAssoc == opAssoc.RIGHT:
if arity == 1:
# try to avoid LR with this extra test
if not isinstance(opExpr, Optional):
opExpr = Optional(opExpr)
matchExpr = _FB(opExpr.expr + thisExpr) + Group(opExpr + thisExpr)
elif arity == 2:
if opExpr is not None:
matchExpr = _FB(lastExpr + opExpr + thisExpr) + Group(lastExpr + OneOrMore(opExpr + thisExpr))
else:
matchExpr = _FB(lastExpr + thisExpr) + Group(lastExpr + OneOrMore(thisExpr))
elif arity == 3:
matchExpr = (_FB(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr)
+ Group(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr))
else:
raise ValueError("operator must be unary (1), binary (2), or ternary (3)")
else:
raise ValueError("operator must indicate right or left associativity")
if pa:
if isinstance(pa, (tuple, list)):
matchExpr.setParseAction(*pa)
else:
matchExpr.setParseAction(pa)
thisExpr <<= (matchExpr.setName(termName) | lastExpr)
lastExpr = thisExpr
ret <<= lastExpr
return ret
operatorPrecedence = infixNotation
"""(Deprecated) Former name of :class:`infixNotation`, will be
dropped in a future release."""
dblQuotedString = Combine(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*') + '"').setName("string enclosed in double quotes")
sglQuotedString = Combine(Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*") + "'").setName("string enclosed in single quotes")
quotedString = Combine(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*') + '"'
| Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*") + "'").setName("quotedString using single or double quotes")
unicodeString = Combine(_L('u') + quotedString.copy()).setName("unicode string literal")
def nestedExpr(opener="(", closer=")", content=None, ignoreExpr=quotedString.copy()):
"""Helper method for defining nested lists enclosed in opening and
closing delimiters ("(" and ")" are the default).
Parameters:
- opener - opening character for a nested list
(default= ``"("``); can also be a pyparsing expression
- closer - closing character for a nested list
(default= ``")"``); can also be a pyparsing expression
- content - expression for items within the nested lists
(default= ``None``)
- ignoreExpr - expression for ignoring opening and closing
delimiters (default= :class:`quotedString`)
If an expression is not provided for the content argument, the
nested expression will capture all whitespace-delimited content
between delimiters as a list of separate values.
Use the ``ignoreExpr`` argument to define expressions that may
contain opening or closing characters that should not be treated as
opening or closing characters for nesting, such as quotedString or
a comment expression. Specify multiple expressions using an
:class:`Or` or :class:`MatchFirst`. The default is
:class:`quotedString`, but if no expressions are to be ignored, then
pass ``None`` for this argument.
Example::
data_type = oneOf("void int short long char float double")
decl_data_type = Combine(data_type + Optional(Word('*')))
ident = Word(alphas+'_', alphanums+'_')
number = pyparsing_common.number
arg = Group(decl_data_type + ident)
LPAR, RPAR = map(Suppress, "()")
code_body = nestedExpr('{', '}', ignoreExpr=(quotedString | cStyleComment))
c_function = (decl_data_type("type")
+ ident("name")
+ LPAR + Optional(delimitedList(arg), [])("args") + RPAR
+ code_body("body"))
c_function.ignore(cStyleComment)
source_code = '''
int is_odd(int x) {
return (x%2);
}
int dec_to_hex(char hchar) {
if (hchar >= '0' && hchar <= '9') {
return (ord(hchar)-ord('0'));
} else {
return (10+ord(hchar)-ord('A'));
}
}
'''
for func in c_function.searchString(source_code):
print("%(name)s (%(type)s) args: %(args)s" % func)
prints::
is_odd (int) args: [['int', 'x']]
dec_to_hex (int) args: [['char', 'hchar']]
"""
if opener == closer:
raise ValueError("opening and closing strings cannot be the same")
if content is None:
if isinstance(opener, basestring) and isinstance(closer, basestring):
if len(opener) == 1 and len(closer) == 1:
if ignoreExpr is not None:
content = (Combine(OneOrMore(~ignoreExpr
+ CharsNotIn(opener
+ closer
+ ParserElement.DEFAULT_WHITE_CHARS, exact=1)
)
).setParseAction(lambda t: t[0].strip()))
else:
content = (empty.copy() + CharsNotIn(opener
+ closer
+ ParserElement.DEFAULT_WHITE_CHARS
).setParseAction(lambda t: t[0].strip()))
else:
if ignoreExpr is not None:
content = (Combine(OneOrMore(~ignoreExpr
+ ~Literal(opener)
+ ~Literal(closer)
+ CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS, exact=1))
).setParseAction(lambda t: t[0].strip()))
else:
content = (Combine(OneOrMore(~Literal(opener)
+ ~Literal(closer)
+ CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS, exact=1))
).setParseAction(lambda t: t[0].strip()))
else:
raise ValueError("opening and closing arguments must be strings if no content expression is given")
ret = Forward()
if ignoreExpr is not None:
ret <<= Group(Suppress(opener) + ZeroOrMore(ignoreExpr | ret | content) + Suppress(closer))
else:
ret <<= Group(Suppress(opener) + ZeroOrMore(ret | content) + Suppress(closer))
ret.setName('nested %s%s expression' % (opener, closer))
return ret
def indentedBlock(blockStatementExpr, indentStack, indent=True):
"""Helper method for defining space-delimited indentation blocks,
such as those used to define block statements in Python source code.
Parameters:
- blockStatementExpr - expression defining syntax of statement that
is repeated within the indented block
- indentStack - list created by caller to manage indentation stack
(multiple statementWithIndentedBlock expressions within a single
grammar should share a common indentStack)
- indent - boolean indicating whether block must be indented beyond
the current level; set to False for block of left-most
statements (default= ``True``)
A valid block must contain at least one ``blockStatement``.
Example::
data = '''
def A(z):
A1
B = 100
G = A2
A2
A3
B
def BB(a,b,c):
BB1
def BBA():
bba1
bba2
bba3
C
D
def spam(x,y):
def eggs(z):
pass
'''
indentStack = [1]
stmt = Forward()
identifier = Word(alphas, alphanums)
funcDecl = ("def" + identifier + Group("(" + Optional(delimitedList(identifier)) + ")") + ":")
func_body = indentedBlock(stmt, indentStack)
funcDef = Group(funcDecl + func_body)
rvalue = Forward()
funcCall = Group(identifier + "(" + Optional(delimitedList(rvalue)) + ")")
rvalue << (funcCall | identifier | Word(nums))
assignment = Group(identifier + "=" + rvalue)
stmt << (funcDef | assignment | identifier)
module_body = OneOrMore(stmt)
parseTree = module_body.parseString(data)
parseTree.pprint()
prints::
[['def',
'A',
['(', 'z', ')'],
':',
[['A1'], [['B', '=', '100']], [['G', '=', 'A2']], ['A2'], ['A3']]],
'B',
['def',
'BB',
['(', 'a', 'b', 'c', ')'],
':',
[['BB1'], [['def', 'BBA', ['(', ')'], ':', [['bba1'], ['bba2'], ['bba3']]]]]],
'C',
'D',
['def',
'spam',
['(', 'x', 'y', ')'],
':',
[[['def', 'eggs', ['(', 'z', ')'], ':', [['pass']]]]]]]
"""
backup_stack = indentStack[:]
def reset_stack():
indentStack[:] = backup_stack
def checkPeerIndent(s, l, t):
if l >= len(s): return
curCol = col(l, s)
if curCol != indentStack[-1]:
if curCol > indentStack[-1]:
raise ParseException(s, l, "illegal nesting")
raise ParseException(s, l, "not a peer entry")
def checkSubIndent(s, l, t):
curCol = col(l, s)
if curCol > indentStack[-1]:
indentStack.append(curCol)
else:
raise ParseException(s, l, "not a subentry")
def checkUnindent(s, l, t):
if l >= len(s): return
curCol = col(l, s)
if not(indentStack and curCol in indentStack):
raise ParseException(s, l, "not an unindent")
if curCol < indentStack[-1]:
indentStack.pop()
NL = OneOrMore(LineEnd().setWhitespaceChars("\t ").suppress(), stopOn=StringEnd())
INDENT = (Empty() + Empty().setParseAction(checkSubIndent)).setName('INDENT')
PEER = Empty().setParseAction(checkPeerIndent).setName('')
UNDENT = Empty().setParseAction(checkUnindent).setName('UNINDENT')
if indent:
smExpr = Group(Optional(NL)
+ INDENT
+ OneOrMore(PEER + Group(blockStatementExpr) + Optional(NL), stopOn=StringEnd())
+ UNDENT)
else:
smExpr = Group(Optional(NL)
+ OneOrMore(PEER + Group(blockStatementExpr) + Optional(NL), stopOn=StringEnd())
+ UNDENT)
smExpr.setFailAction(lambda a, b, c, d: reset_stack())
blockStatementExpr.ignore(_bslash + LineEnd())
return smExpr.setName('indented block')
alphas8bit = srange(r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]")
punc8bit = srange(r"[\0xa1-\0xbf\0xd7\0xf7]")
anyOpenTag, anyCloseTag = makeHTMLTags(Word(alphas, alphanums + "_:").setName('any tag'))
_htmlEntityMap = dict(zip("gt lt amp nbsp quot apos".split(), '><& "\''))
commonHTMLEntity = Regex('&(?P<entity>' + '|'.join(_htmlEntityMap.keys()) +");").setName("common HTML entity")
def replaceHTMLEntity(t):
"""Helper parser action to replace common HTML entities with their special characters"""
return _htmlEntityMap.get(t.entity)
# it's easy to get these comment structures wrong - they're very common, so may as well make them available
cStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/').setName("C style comment")
"Comment of the form ``/* ... */``"
htmlComment = Regex(r"<!--[\s\S]*?-->").setName("HTML comment")
"Comment of the form ``<!-- ... -->``"
restOfLine = Regex(r".*").leaveWhitespace().setName("rest of line")
dblSlashComment = Regex(r"//(?:\\\n|[^\n])*").setName("// comment")
"Comment of the form ``// ... (to end of line)``"
cppStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/' | dblSlashComment).setName("C++ style comment")
"Comment of either form :class:`cStyleComment` or :class:`dblSlashComment`"
javaStyleComment = cppStyleComment
"Same as :class:`cppStyleComment`"
pythonStyleComment = Regex(r"#.*").setName("Python style comment")
"Comment of the form ``# ... (to end of line)``"
_commasepitem = Combine(OneOrMore(Word(printables, excludeChars=',')
+ Optional(Word(" \t")
+ ~Literal(",") + ~LineEnd()))).streamline().setName("commaItem")
commaSeparatedList = delimitedList(Optional(quotedString.copy() | _commasepitem, default="")).setName("commaSeparatedList")
"""(Deprecated) Predefined expression of 1 or more printable words or
quoted strings, separated by commas.
This expression is deprecated in favor of :class:`pyparsing_common.comma_separated_list`.
"""
# some other useful expressions - using lower-case class name since we are really using this as a namespace
class pyparsing_common:
"""Here are some common low-level expressions that may be useful in
jump-starting parser development:
- numeric forms (:class:`integers<integer>`, :class:`reals<real>`,
:class:`scientific notation<sci_real>`)
- common :class:`programming identifiers<identifier>`
- network addresses (:class:`MAC<mac_address>`,
:class:`IPv4<ipv4_address>`, :class:`IPv6<ipv6_address>`)
- ISO8601 :class:`dates<iso8601_date>` and
:class:`datetime<iso8601_datetime>`
- :class:`UUID<uuid>`
- :class:`comma-separated list<comma_separated_list>`
Parse actions:
- :class:`convertToInteger`
- :class:`convertToFloat`
- :class:`convertToDate`
- :class:`convertToDatetime`
- :class:`stripHTMLTags`
- :class:`upcaseTokens`
- :class:`downcaseTokens`
Example::
pyparsing_common.number.runTests('''
# any int or real number, returned as the appropriate type
100
-100
+100
3.14159
6.02e23
1e-12
''')
pyparsing_common.fnumber.runTests('''
# any int or real number, returned as float
100
-100
+100
3.14159
6.02e23
1e-12
''')
pyparsing_common.hex_integer.runTests('''
# hex numbers
100
FF
''')
pyparsing_common.fraction.runTests('''
# fractions
1/2
-3/4
''')
pyparsing_common.mixed_integer.runTests('''
# mixed fractions
1
1/2
-3/4
1-3/4
''')
import uuid
pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID))
pyparsing_common.uuid.runTests('''
# uuid
12345678-1234-5678-1234-567812345678
''')
prints::
# any int or real number, returned as the appropriate type
100
[100]
-100
[-100]
+100
[100]
3.14159
[3.14159]
6.02e23
[6.02e+23]
1e-12
[1e-12]
# any int or real number, returned as float
100
[100.0]
-100
[-100.0]
+100
[100.0]
3.14159
[3.14159]
6.02e23
[6.02e+23]
1e-12
[1e-12]
# hex numbers
100
[256]
FF
[255]
# fractions
1/2
[0.5]
-3/4
[-0.75]
# mixed fractions
1
[1]
1/2
[0.5]
-3/4
[-0.75]
1-3/4
[1.75]
# uuid
12345678-1234-5678-1234-567812345678
[UUID('12345678-1234-5678-1234-567812345678')]
"""
convertToInteger = tokenMap(int)
"""
Parse action for converting parsed integers to Python int
"""
convertToFloat = tokenMap(float)
"""
Parse action for converting parsed numbers to Python float
"""
integer = Word(nums).setName("integer").setParseAction(convertToInteger)
"""expression that parses an unsigned integer, returns an int"""
hex_integer = Word(hexnums).setName("hex integer").setParseAction(tokenMap(int, 16))
"""expression that parses a hexadecimal integer, returns an int"""
signed_integer = Regex(r'[+-]?\d+').setName("signed integer").setParseAction(convertToInteger)
"""expression that parses an integer with optional leading sign, returns an int"""
fraction = (signed_integer().setParseAction(convertToFloat) + '/' + signed_integer().setParseAction(convertToFloat)).setName("fraction")
"""fractional expression of an integer divided by an integer, returns a float"""
fraction.addParseAction(lambda t: t[0]/t[-1])
mixed_integer = (fraction | signed_integer + Optional(Optional('-').suppress() + fraction)).setName("fraction or mixed integer-fraction")
"""mixed integer of the form 'integer - fraction', with optional leading integer, returns float"""
mixed_integer.addParseAction(sum)
real = Regex(r'[+-]?(?:\d+\.\d*|\.\d+)').setName("real number").setParseAction(convertToFloat)
"""expression that parses a floating point number and returns a float"""
sci_real = Regex(r'[+-]?(?:\d+(?:[eE][+-]?\d+)|(?:\d+\.\d*|\.\d+)(?:[eE][+-]?\d+)?)').setName("real number with scientific notation").setParseAction(convertToFloat)
"""expression that parses a floating point number with optional
scientific notation and returns a float"""
# streamlining this expression makes the docs nicer-looking
number = (sci_real | real | signed_integer).streamline()
"""any numeric expression, returns the corresponding Python type"""
fnumber = Regex(r'[+-]?\d+\.?\d*([eE][+-]?\d+)?').setName("fnumber").setParseAction(convertToFloat)
"""any int or real number, returned as float"""
identifier = Word(alphas + '_', alphanums + '_').setName("identifier")
"""typical code identifier (leading alpha or '_', followed by 0 or more alphas, nums, or '_')"""
ipv4_address = Regex(r'(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})(\.(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})){3}').setName("IPv4 address")
"IPv4 address (``0.0.0.0 - 255.255.255.255``)"
_ipv6_part = Regex(r'[0-9a-fA-F]{1,4}').setName("hex_integer")
_full_ipv6_address = (_ipv6_part + (':' + _ipv6_part) * 7).setName("full IPv6 address")
_short_ipv6_address = (Optional(_ipv6_part + (':' + _ipv6_part) * (0, 6))
+ "::"
+ Optional(_ipv6_part + (':' + _ipv6_part) * (0, 6))
).setName("short IPv6 address")
_short_ipv6_address.addCondition(lambda t: sum(1 for tt in t if pyparsing_common._ipv6_part.matches(tt)) < 8)
_mixed_ipv6_address = ("::ffff:" + ipv4_address).setName("mixed IPv6 address")
ipv6_address = Combine((_full_ipv6_address | _mixed_ipv6_address | _short_ipv6_address).setName("IPv6 address")).setName("IPv6 address")
"IPv6 address (long, short, or mixed form)"
mac_address = Regex(r'[0-9a-fA-F]{2}([:.-])[0-9a-fA-F]{2}(?:\1[0-9a-fA-F]{2}){4}').setName("MAC address")
"MAC address xx:xx:xx:xx:xx (may also have '-' or '.' delimiters)"
@staticmethod
def convertToDate(fmt="%Y-%m-%d"):
"""
Helper to create a parse action for converting parsed date string to Python datetime.date
Params -
- fmt - format to be passed to datetime.strptime (default= ``"%Y-%m-%d"``)
Example::
date_expr = pyparsing_common.iso8601_date.copy()
date_expr.setParseAction(pyparsing_common.convertToDate())
print(date_expr.parseString("1999-12-31"))
prints::
[datetime.date(1999, 12, 31)]
"""
def cvt_fn(s, l, t):
try:
return datetime.strptime(t[0], fmt).date()
except ValueError as ve:
raise ParseException(s, l, str(ve))
return cvt_fn
@staticmethod
def convertToDatetime(fmt="%Y-%m-%dT%H:%M:%S.%f"):
"""Helper to create a parse action for converting parsed
datetime string to Python datetime.datetime
Params -
- fmt - format to be passed to datetime.strptime (default= ``"%Y-%m-%dT%H:%M:%S.%f"``)
Example::
dt_expr = pyparsing_common.iso8601_datetime.copy()
dt_expr.setParseAction(pyparsing_common.convertToDatetime())
print(dt_expr.parseString("1999-12-31T23:59:59.999"))
prints::
[datetime.datetime(1999, 12, 31, 23, 59, 59, 999000)]
"""
def cvt_fn(s, l, t):
try:
return datetime.strptime(t[0], fmt)
except ValueError as ve:
raise ParseException(s, l, str(ve))
return cvt_fn
iso8601_date = Regex(r'(?P<year>\d{4})(?:-(?P<month>\d\d)(?:-(?P<day>\d\d))?)?').setName("ISO8601 date")
"ISO8601 date (``yyyy-mm-dd``)"
iso8601_datetime = Regex(r'(?P<year>\d{4})-(?P<month>\d\d)-(?P<day>\d\d)[T ](?P<hour>\d\d):(?P<minute>\d\d)(:(?P<second>\d\d(\.\d*)?)?)?(?P<tz>Z|[+-]\d\d:?\d\d)?').setName("ISO8601 datetime")
"ISO8601 datetime (``yyyy-mm-ddThh:mm:ss.s(Z|+-00:00)``) - trailing seconds, milliseconds, and timezone optional; accepts separating ``'T'`` or ``' '``"
uuid = Regex(r'[0-9a-fA-F]{8}(-[0-9a-fA-F]{4}){3}-[0-9a-fA-F]{12}').setName("UUID")
"UUID (``xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx``)"
_html_stripper = anyOpenTag.suppress() | anyCloseTag.suppress()
@staticmethod
def stripHTMLTags(s, l, tokens):
"""Parse action to remove HTML tags from web page HTML source
Example::
# strip HTML links from normal text
text = '<td>More info at the <a href="https://github.com/pyparsing/pyparsing/wiki">pyparsing</a> wiki page</td>'
td, td_end = makeHTMLTags("TD")
table_text = td + SkipTo(td_end).setParseAction(pyparsing_common.stripHTMLTags)("body") + td_end
print(table_text.parseString(text).body)
Prints::
More info at the pyparsing wiki page
"""
return pyparsing_common._html_stripper.transformString(tokens[0])
_commasepitem = Combine(OneOrMore(~Literal(",")
+ ~LineEnd()
+ Word(printables, excludeChars=',')
+ Optional(White(" \t")))).streamline().setName("commaItem")
comma_separated_list = delimitedList(Optional(quotedString.copy()
| _commasepitem, default='')
).setName("comma separated list")
"""Predefined expression of 1 or more printable words or quoted strings, separated by commas."""
upcaseTokens = staticmethod(tokenMap(lambda t: _ustr(t).upper()))
"""Parse action to convert tokens to upper case."""
downcaseTokens = staticmethod(tokenMap(lambda t: _ustr(t).lower()))
"""Parse action to convert tokens to lower case."""
class _lazyclassproperty(object):
def __init__(self, fn):
self.fn = fn
self.__doc__ = fn.__doc__
self.__name__ = fn.__name__
def __get__(self, obj, cls):
if cls is None:
cls = type(obj)
if not hasattr(cls, '_intern') or any(cls._intern is getattr(superclass, '_intern', [])
for superclass in cls.__mro__[1:]):
cls._intern = {}
attrname = self.fn.__name__
if attrname not in cls._intern:
cls._intern[attrname] = self.fn(cls)
return cls._intern[attrname]
class unicode_set(object):
"""
A set of Unicode characters, for language-specific strings for
``alphas``, ``nums``, ``alphanums``, and ``printables``.
A unicode_set is defined by a list of ranges in the Unicode character
set, in a class attribute ``_ranges``, such as::
_ranges = [(0x0020, 0x007e), (0x00a0, 0x00ff),]
A unicode set can also be defined using multiple inheritance of other unicode sets::
class CJK(Chinese, Japanese, Korean):
pass
"""
_ranges = []
@classmethod
def _get_chars_for_ranges(cls):
ret = []
for cc in cls.__mro__:
if cc is unicode_set:
break
for rr in cc._ranges:
ret.extend(range(rr[0], rr[-1] + 1))
return [unichr(c) for c in sorted(set(ret))]
@_lazyclassproperty
def printables(cls):
"all non-whitespace characters in this range"
return u''.join(filterfalse(unicode.isspace, cls._get_chars_for_ranges()))
@_lazyclassproperty
def alphas(cls):
"all alphabetic characters in this range"
return u''.join(filter(unicode.isalpha, cls._get_chars_for_ranges()))
@_lazyclassproperty
def nums(cls):
"all numeric digit characters in this range"
return u''.join(filter(unicode.isdigit, cls._get_chars_for_ranges()))
@_lazyclassproperty
def alphanums(cls):
"all alphanumeric characters in this range"
return cls.alphas + cls.nums
class pyparsing_unicode(unicode_set):
"""
A namespace class for defining common language unicode_sets.
"""
_ranges = [(32, sys.maxunicode)]
class Latin1(unicode_set):
"Unicode set for Latin-1 Unicode Character Range"
_ranges = [(0x0020, 0x007e), (0x00a0, 0x00ff),]
class LatinA(unicode_set):
"Unicode set for Latin-A Unicode Character Range"
_ranges = [(0x0100, 0x017f),]
class LatinB(unicode_set):
"Unicode set for Latin-B Unicode Character Range"
_ranges = [(0x0180, 0x024f),]
class Greek(unicode_set):
"Unicode set for Greek Unicode Character Ranges"
_ranges = [
(0x0370, 0x03ff), (0x1f00, 0x1f15), (0x1f18, 0x1f1d), (0x1f20, 0x1f45), (0x1f48, 0x1f4d),
(0x1f50, 0x1f57), (0x1f59,), (0x1f5b,), (0x1f5d,), (0x1f5f, 0x1f7d), (0x1f80, 0x1fb4), (0x1fb6, 0x1fc4),
(0x1fc6, 0x1fd3), (0x1fd6, 0x1fdb), (0x1fdd, 0x1fef), (0x1ff2, 0x1ff4), (0x1ff6, 0x1ffe),
]
class Cyrillic(unicode_set):
"Unicode set for Cyrillic Unicode Character Range"
_ranges = [(0x0400, 0x04ff)]
class Chinese(unicode_set):
"Unicode set for Chinese Unicode Character Range"
_ranges = [(0x4e00, 0x9fff), (0x3000, 0x303f),]
class Japanese(unicode_set):
"Unicode set for Japanese Unicode Character Range, combining Kanji, Hiragana, and Katakana ranges"
_ranges = []
class Kanji(unicode_set):
"Unicode set for Kanji Unicode Character Range"
_ranges = [(0x4E00, 0x9Fbf), (0x3000, 0x303f),]
class Hiragana(unicode_set):
"Unicode set for Hiragana Unicode Character Range"
_ranges = [(0x3040, 0x309f),]
class Katakana(unicode_set):
"Unicode set for Katakana Unicode Character Range"
_ranges = [(0x30a0, 0x30ff),]
class Korean(unicode_set):
"Unicode set for Korean Unicode Character Range"
_ranges = [(0xac00, 0xd7af), (0x1100, 0x11ff), (0x3130, 0x318f), (0xa960, 0xa97f), (0xd7b0, 0xd7ff), (0x3000, 0x303f),]
class CJK(Chinese, Japanese, Korean):
"Unicode set for combined Chinese, Japanese, and Korean (CJK) Unicode Character Range"
pass
class Thai(unicode_set):
"Unicode set for Thai Unicode Character Range"
_ranges = [(0x0e01, 0x0e3a), (0x0e3f, 0x0e5b),]
class Arabic(unicode_set):
"Unicode set for Arabic Unicode Character Range"
_ranges = [(0x0600, 0x061b), (0x061e, 0x06ff), (0x0700, 0x077f),]
class Hebrew(unicode_set):
"Unicode set for Hebrew Unicode Character Range"
_ranges = [(0x0590, 0x05ff),]
class Devanagari(unicode_set):
"Unicode set for Devanagari Unicode Character Range"
_ranges = [(0x0900, 0x097f), (0xa8e0, 0xa8ff)]
pyparsing_unicode.Japanese._ranges = (pyparsing_unicode.Japanese.Kanji._ranges
+ pyparsing_unicode.Japanese.Hiragana._ranges
+ pyparsing_unicode.Japanese.Katakana._ranges)
# define ranges in language character sets
if PY_3:
setattr(pyparsing_unicode, u"العربية", pyparsing_unicode.Arabic)
setattr(pyparsing_unicode, u"中文", pyparsing_unicode.Chinese)
setattr(pyparsing_unicode, u"кириллица", pyparsing_unicode.Cyrillic)
setattr(pyparsing_unicode, u"Ελληνικά", pyparsing_unicode.Greek)
setattr(pyparsing_unicode, u"עִברִית", pyparsing_unicode.Hebrew)
setattr(pyparsing_unicode, u"日本語", pyparsing_unicode.Japanese)
setattr(pyparsing_unicode.Japanese, u"漢字", pyparsing_unicode.Japanese.Kanji)
setattr(pyparsing_unicode.Japanese, u"カタカナ", pyparsing_unicode.Japanese.Katakana)
setattr(pyparsing_unicode.Japanese, u"ひらがな", pyparsing_unicode.Japanese.Hiragana)
setattr(pyparsing_unicode, u"한국어", pyparsing_unicode.Korean)
setattr(pyparsing_unicode, u"ไทย", pyparsing_unicode.Thai)
setattr(pyparsing_unicode, u"देवनागरी", pyparsing_unicode.Devanagari)
class pyparsing_test:
"""
namespace class for classes useful in writing unit tests
"""
class reset_pyparsing_context:
"""
Context manager to be used when writing unit tests that modify pyparsing config values:
- packrat parsing
- default whitespace characters.
- default keyword characters
- literal string auto-conversion class
- __diag__ settings
Example:
with reset_pyparsing_context():
# test that literals used to construct a grammar are automatically suppressed
ParserElement.inlineLiteralsUsing(Suppress)
term = Word(alphas) | Word(nums)
group = Group('(' + term[...] + ')')
# assert that the '()' characters are not included in the parsed tokens
self.assertParseAndCheckLisst(group, "(abc 123 def)", ['abc', '123', 'def'])
# after exiting context manager, literals are converted to Literal expressions again
"""
def __init__(self):
self._save_context = {}
def save(self):
self._save_context["default_whitespace"] = ParserElement.DEFAULT_WHITE_CHARS
self._save_context["default_keyword_chars"] = Keyword.DEFAULT_KEYWORD_CHARS
self._save_context[
"literal_string_class"
] = ParserElement._literalStringClass
self._save_context["packrat_enabled"] = ParserElement._packratEnabled
self._save_context["packrat_parse"] = ParserElement._parse
self._save_context["__diag__"] = {
name: getattr(__diag__, name) for name in __diag__._all_names
}
self._save_context["__compat__"] = {
"collect_all_And_tokens": __compat__.collect_all_And_tokens
}
return self
def restore(self):
# reset pyparsing global state
if (
ParserElement.DEFAULT_WHITE_CHARS
!= self._save_context["default_whitespace"]
):
ParserElement.setDefaultWhitespaceChars(
self._save_context["default_whitespace"]
)
Keyword.DEFAULT_KEYWORD_CHARS = self._save_context["default_keyword_chars"]
ParserElement.inlineLiteralsUsing(
self._save_context["literal_string_class"]
)
for name, value in self._save_context["__diag__"].items():
setattr(__diag__, name, value)
ParserElement._packratEnabled = self._save_context["packrat_enabled"]
ParserElement._parse = self._save_context["packrat_parse"]
__compat__.collect_all_And_tokens = self._save_context["__compat__"]
def __enter__(self):
return self.save()
def __exit__(self, *args):
return self.restore()
class TestParseResultsAsserts:
"""
A mixin class to add parse results assertion methods to normal unittest.TestCase classes.
"""
def assertParseResultsEquals(
self, result, expected_list=None, expected_dict=None, msg=None
):
"""
Unit test assertion to compare a ParseResults object with an optional expected_list,
and compare any defined results names with an optional expected_dict.
"""
if expected_list is not None:
self.assertEqual(expected_list, result.asList(), msg=msg)
if expected_dict is not None:
self.assertEqual(expected_dict, result.asDict(), msg=msg)
def assertParseAndCheckList(
self, expr, test_string, expected_list, msg=None, verbose=True
):
"""
Convenience wrapper assert to test a parser element and input string, and assert that
the resulting ParseResults.asList() is equal to the expected_list.
"""
result = expr.parseString(test_string, parseAll=True)
if verbose:
print(result.dump())
self.assertParseResultsEquals(result, expected_list=expected_list, msg=msg)
def assertParseAndCheckDict(
self, expr, test_string, expected_dict, msg=None, verbose=True
):
"""
Convenience wrapper assert to test a parser element and input string, and assert that
the resulting ParseResults.asDict() is equal to the expected_dict.
"""
result = expr.parseString(test_string, parseAll=True)
if verbose:
print(result.dump())
self.assertParseResultsEquals(result, expected_dict=expected_dict, msg=msg)
def assertRunTestResults(
self, run_tests_report, expected_parse_results=None, msg=None
):
"""
Unit test assertion to evaluate output of ParserElement.runTests(). If a list of
list-dict tuples is given as the expected_parse_results argument, then these are zipped
with the report tuples returned by runTests and evaluated using assertParseResultsEquals.
Finally, asserts that the overall runTests() success value is True.
:param run_tests_report: tuple(bool, [tuple(str, ParseResults or Exception)]) returned from runTests
:param expected_parse_results (optional): [tuple(str, list, dict, Exception)]
"""
run_test_success, run_test_results = run_tests_report
if expected_parse_results is not None:
merged = [
(rpt[0], rpt[1], expected)
for rpt, expected in zip(run_test_results, expected_parse_results)
]
for test_string, result, expected in merged:
# expected should be a tuple containing a list and/or a dict or an exception,
# and optional failure message string
# an empty tuple will skip any result validation
fail_msg = next(
(exp for exp in expected if isinstance(exp, str)), None
)
expected_exception = next(
(
exp
for exp in expected
if isinstance(exp, type) and issubclass(exp, Exception)
),
None,
)
if expected_exception is not None:
with self.assertRaises(
expected_exception=expected_exception, msg=fail_msg or msg
):
if isinstance(result, Exception):
raise result
else:
expected_list = next(
(exp for exp in expected if isinstance(exp, list)), None
)
expected_dict = next(
(exp for exp in expected if isinstance(exp, dict)), None
)
if (expected_list, expected_dict) != (None, None):
self.assertParseResultsEquals(
result,
expected_list=expected_list,
expected_dict=expected_dict,
msg=fail_msg or msg,
)
else:
# warning here maybe?
print("no validation for {!r}".format(test_string))
# do this last, in case some specific test results can be reported instead
self.assertTrue(
run_test_success, msg=msg if msg is not None else "failed runTests"
)
@contextmanager
def assertRaisesParseException(self, exc_type=ParseException, msg=None):
with self.assertRaises(exc_type, msg=msg):
yield
if __name__ == "__main__":
selectToken = CaselessLiteral("select")
fromToken = CaselessLiteral("from")
ident = Word(alphas, alphanums + "_$")
columnName = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens)
columnNameList = Group(delimitedList(columnName)).setName("columns")
columnSpec = ('*' | columnNameList)
tableName = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens)
tableNameList = Group(delimitedList(tableName)).setName("tables")
simpleSQL = selectToken("command") + columnSpec("columns") + fromToken + tableNameList("tables")
# demo runTests method, including embedded comments in test string
simpleSQL.runTests("""
# '*' as column list and dotted table name
select * from SYS.XYZZY
# caseless match on "SELECT", and casts back to "select"
SELECT * from XYZZY, ABC
# list of column names, and mixed case SELECT keyword
Select AA,BB,CC from Sys.dual
# multiple tables
Select A, B, C from Sys.dual, Table2
# invalid SELECT keyword - should fail
Xelect A, B, C from Sys.dual
# incomplete command - should fail
Select
# invalid column name - should fail
Select ^^^ frox Sys.dual
""")
pyparsing_common.number.runTests("""
100
-100
+100
3.14159
6.02e23
1e-12
""")
# any int or real number, returned as float
pyparsing_common.fnumber.runTests("""
100
-100
+100
3.14159
6.02e23
1e-12
""")
pyparsing_common.hex_integer.runTests("""
100
FF
""")
import uuid
pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID))
pyparsing_common.uuid.runTests("""
12345678-1234-5678-1234-567812345678
""")
|
sserrot/champion_relationships
|
venv/Lib/site-packages/pyparsing.py
|
Python
|
mit
| 273,365
|
[
"VisIt"
] |
a315ff64ecfcb7e7aba2cf94598ee726071d5200fead0e770a4d2aa7bfefdc88
|
"""
blast related classes and functions
"""
from .BlastTools import create_blast_map,get_blast_map
from .Blast import Blast
from .BlastMapper import BlastMapper
from .ParallelBlast import ParallelBlast
from .ParseBlast import ParseBlast
from .ParseParallelBlast import ParseParallelBlast
|
ajrichards/htsint
|
htsint/blast/__init__.py
|
Python
|
bsd-3-clause
| 290
|
[
"BLAST"
] |
9e1323814760c00a39538f2de610be38cf1eef3925fd2d0c177a2f8921e6a00f
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 12 20:20:03 2018
@author: BallBlueMeercat
"""
import time
import dnest4
import numpy as np
import pandas as pd
import numpy.random as rng
import pickle
#from numba import jitclass, int32
import datasim
import results
import tools
#from scipy.special import erf
# from prior = 0, short = 1, medium = 2, long = 3
speed = 1
# Sigma of the noise on data.
sigma = 0.07
# Loading data:
#dataname = 'mag_z_LCDM_1000_sigma_'+str(sigma)
#mag, zpicks = results.load('./data', dataname)
dataname = './data/lcparam_full_long.txt'
pantheon = pd.read_csv(dataname, sep=" ")
# Reading each txt file column of interest as numpy.ndarray
mag = pantheon.mb.values
x1 = pantheon.x1.values
colour = pantheon.color.values
zpicks = pantheon.zhel.values
# Stacking them together and sorting by accending redshift.
data = np.stack((mag,x1,colour,zpicks), axis=0)
data.sort(axis=-1)
mag = data[0]
x1 = data[1]
colour = data[2]
zpicks = data[3]
zpicks = zpicks.tolist()
data_dict = {'mag':mag, 'x1':x1, 'colour':colour, 'zpicks':zpicks}
class Model(object):
"""
Specify the model in Python.
"""
def __init__(self, g_lim=None):
"""
Parameter values *are not* stored inside the class
"""
self.M_min = -20
self.M_max = -18
self.a_min = -20
self.a_max = 20
self.b_min = -20
self.b_max = 20
if g_lim != None:
self.g_min = g_lim[0]
self.g_max = g_lim[1]
# pass
def from_prior(self):
"""
Unlike in C++, this must *return* a numpy array of parameters.
"""
m = rng.rand()
M = 1E3*rng.rand()
M = dnest4.wrap(M, self.M_min, self.M_max)
a = 1E3*rng.rand()
a = dnest4.wrap(a, self.a_min, self.a_max)
b = 1E3*rng.rand()
b = dnest4.wrap(b, self.b_min, self.b_max)
if g_lim != None:
g = 1E3*rng.rand()
g = dnest4.wrap(g, self.g_min, self.g_max)
return np.array([m, M, a, b, g])
return np.array([m, M, a, b])
def perturb(self, params):
"""
Unlike in C++, this takes a numpy array of parameters as input,
and modifies it in-place. The return value is still logH.
"""
logH = 0.0
which = rng.randint(len(params))
# Note the difference between dnest4.wrap in Python and
# DNest4::wrap in C++. The former *returns* the wrapped value.
if which == 0:
params[which] += dnest4.randh()
params[which] = dnest4.wrap(params[which], 0.0, 1.0)
elif which == 1:
params[which] += dnest4.randh()
params[which] = dnest4.wrap(params[which], self.M_min, self.M_max)
elif which == 2:
params[which] += dnest4.randh()
params[which] = dnest4.wrap(params[which], self.a_min, self.a_max)
elif which == 3:
params[which] += dnest4.randh()
params[which] = dnest4.wrap(params[which], self.b_min, self.b_max)
elif which == 4:
params[which] += dnest4.randh()
params[which] = dnest4.wrap(params[which], self.g_min, self.g_max)
return logH
def log_likelihood(self, params):
"""
Gaussian sampling distribution.
"""
if len(params) > 4:
m, M, a, b, g = params
theta = {'m':m, 'M':M, 'a':a, 'b':b, 'gamma':g}
else:
m, M, a, b = params
theta = {'m':m, 'M':M, 'a':a, 'b':b}
model = datasim.magn(theta, data_dict, key)
var = sigma**2.0
return -0.5*np.sum((mag-model)**2.0 /var +0.5*np.log(2.0*np.pi*var))
# def randh(self):
# """
# Generate from the heavy-tailed distribution.
# """
# a = np.random.randn()
# b = np.random.rand()
# t = a/np.sqrt(-np.log(b))
# n = np.random.randn()
# return 10.0**(1.5 - 3*np.abs(t))*n
#
# def wrap(self, x, a, b):
# assert b > a
# return (x - a)%(b - a) + a
firstderivs_functions = [None
,'exotic'
,'late_intxde'
,'heaviside_late_int'
,'late_int'
,'expgamma'
,'txgamma' # doesn't converge
,'zxgamma'
,'gamma_over_z' # doesn't converge
,'zxxgamma' # gamma forced positive in firstderivs
,'gammaxxz' # gamma forced positive in firstderivs
,'rdecay_m'
,'rdecay_de'
,'rdecay_mxde'
,'rdecay'
,'interacting'
,'LCDM'
]
for key in firstderivs_functions:
if key:
if key == 'exotic':
g_lim = [-1.5, 0.1]
elif key == 'late_intxde':
g_lim = [-2, 0.1]
elif key == 'heaviside_late_int':
g_lim = [-1.45, 0.1]
elif key == 'late_int':
g_lim = [-15, 0.1]
elif key == 'expgamma':
g_lim = [-0.1, 1.5]
elif key == 'txgamma':
g_lim = [-0.5, 0.1]
elif key == 'zxgamma':
g_lim = [-10, 0.1]
elif key == 'zxxgamma':
g_lim = [-0.1, 12]
elif key == 'gammaxxz':
g_lim = [-1, 1]
elif key == 'rdecay_m':
g_lim = [-3, 0]
elif key == 'rdecay':
g_lim = [-2, 0]
elif key == 'interacting':
g_lim = [-1.5, 0.1]
elif key == 'LCDM':
g_lim = None
else:
g_lim = [-10,10]
# Create a model object and a sampler
model = Model(g_lim)
sampler = dnest4.DNest4Sampler(model,
backend=dnest4.backends.CSVBackend(".",
sep=" "))
if speed == 3:
# LONG Set up the sampler. The first argument is max_num_levels
gen = sampler.sample(max_num_levels=30, num_steps=1000,
new_level_interval=10000, num_per_step=10000,
thread_steps=100, num_particles=5,
lam=10, beta=100, seed=1234)
elif speed == 2:
# MEDIUM num_per_step can be down to a few thousand
gen = sampler.sample(max_num_levels=30, num_steps=1000,
new_level_interval=1000, num_per_step=1000,
thread_steps=100, num_particles=5,
lam=10, beta=100, seed=1234)
elif speed == 1:
# SHORT
gen = sampler.sample(max_num_levels=30, num_steps=100,
new_level_interval=100, num_per_step=100,
thread_steps=10, num_particles=5,
lam=10, beta=100, seed=1234)
elif speed == 0:
# SHORT, sampling from prior
gen = sampler.sample(max_num_levels=1, num_steps=1000,
new_level_interval=100, num_per_step=100,
thread_steps=10, num_particles=5,
lam=10, beta=100, seed=1234)
# import cProfile, pstats, io
# pr = cProfile.Profile()
# pr.enable()
ti = time.time()
# Do the sampling (one iteration here = one particle save)
for i, sample in enumerate(gen):
# print("# Saved {k} particles.".format(k=(i+1)))
pass
tf = time.time()
# pr.disable()
# s = io.StringIO()
# sortby = 'cumulative'
# ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
# ps.print_stats()
# print (s.getvalue())
dnest_time = tools.timer('Bfactor', ti, tf)
print('testing =',key)
print('data =', dataname)
print('sigma =', sigma)
# Run the postprocessing
info = dnest4.postprocess()
if speed > 1:
f = open('brief.txt','w')
f.write(dnest_time +'\n'
+'model = '+key +'\n'
+'data = '+ dataname +'\n'
+'sigma = '+str(sigma) +'\n'
+'log(Z) = '+str(info[0]) +'\n'
+'Information = '+str(info[1]) +'\n'
+'speed = '+str(speed))
f.close()
pickle.dump(info[0], open('evidence.p', 'wb'))
# Moving output .txt files into a run specific folder.
results.relocate('evidence.p', speed, key)
results.relocate('levels.txt', speed, key)
results.relocate('posterior_sample.txt', speed, key)
results.relocate('sample_info.txt', speed, key)
results.relocate('sample.txt', speed, key)
results.relocate('sampler_state.txt', speed, key)
results.relocate('weights.txt', speed, key)
results.relocate('brief.txt', speed, key)
results.relocate('plot_1.pdf', speed, key)
results.relocate('plot_2.pdf', speed, key)
results.relocate('plot_3.pdf', speed, key)
else:
# Histogram of parameters found by DNest4.
array = np.loadtxt('sample.txt')
import matplotlib.pyplot as plt
if key == 'LCDM':
plt.figure()
plt.title('matter')
plt.hist(array[:,0])
plt.show()
plt.figure()
plt.title('M_b')
plt.hist(array[:,1])
plt.show()
plt.figure()
plt.title('alpha')
plt.hist(array[:,2])
plt.show()
plt.figure()
plt.title('beta')
plt.hist(array[:,3])
plt.show()
else:
plt.figure()
plt.title('matter')
plt.hist(array[:,0])
plt.show()
plt.figure()
plt.title('M_b')
plt.hist(array[:,1])
plt.show()
plt.figure()
plt.title('alpha')
plt.hist(array[:,2])
plt.show()
plt.figure()
plt.title('beta')
plt.hist(array[:,3])
plt.show()
plt.figure()
plt.title('gamma')
plt.hist(array[:,4])
plt.show()
#import six
#import sys
## Run the postprocessing to get marginal likelihood and generate posterior
#samples logZdnest4, infogaindnest4, plot = dnest4.postprocess()
#
#postsamples = np.loadtxt('posterior_sample.txt')
#
#print(six.u('Marginalised evidence is {}'.format(logZdnest4)))
#
#print('Number of posterior samples is {}'.format(postsamples.shape[0]))
#
## plot posterior samples (if corner.py is installed)
#try:
# import matplotlib as mpl
# mpl.use("Agg") # force Matplotlib backend to Agg
# import corner # import corner.py
#except ImportError:
# sys.exit(1)
#
#m = 0.3
#g=0
#fig = corner.corner(postsamples, labels=[r"$m$", r"$c$"], truths=[m, g])
#fig.savefig('DNest4.png')
# LCDM
#log(Z) = -1622866.8534441872
#Information = 14.078678027261049 nats.
#Effective sample size = 129.22232212112772
#time 297min 50s
#log(Z) = -1622866.790641218
#Information = 13.905435690656304 nats.
#Effective sample size = 167.73507536834273
#time 34 min
#rdecay
#log(Z) = -1622866.8177826053
#Information = 13.970533838961273 nats.
#Effective sample size = 85.54638980461822
#Sampling time: 37min 5s
############ 0.01 sigma data
#Hdecay
#Sampling time: 38min 57s
#log(Z) = -1158842.6212481956
#Information = 26.434626991627738 nats.
#Effective sample size = 116.96489141639181
#edecay
#Sampling time: 45min 57s
#log(Z) = -49925.259544267705
#Information = 19.683044903278642 nats.
#Effective sample size = 162.7283801030449
#LCDM
#Sampling time: 31min 52s
#log(Z) = -1622866.7230921672
#Information = 13.870062695583329 nats.
#Effective sample size = 178.67158154325102
############ 0.1 sigma data
#Hdecay
#Sampling time: 26min 26s
#data = mag_z_LCDM_1000_sigma_0.1
#sigma = 0.1
#log(Z) = -11392.938034458695
#Information = 16.85219457607309 nats.
#Effective sample size = 216.9365844057018
#rdecay
#Sampling time: 25min 4s
#data = mag_z_LCDM_1000_sigma_0.1
#sigma = 0.1
#log(Z) = -16069.573635539238
#Information = 8.730470507740392 nats.
#Effective sample size = 172.4071834775586
#LCDM
#Sampling time: 23min 45s
#data = mag_z_LCDM_1000_sigma_0.1
#sigma = 0.1
#log(Z) = -16070.356294581907
#Information = 9.449718869756907 nats.
#Effective sample size = 142.47418654118337
|
lefthandedroo/Cosmo-models
|
zprev versions/Models_py_backup/Models backup/Bfactor.py
|
Python
|
mit
| 13,227
|
[
"Gaussian"
] |
00d1e908a6ee0ab82585cf253620b977d71de43da7892367338380394029b61f
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (c), 2016-2017, Quantum Espresso Foundation and SISSA (Scuola
# Internazionale Superiore di Studi Avanzati). All rights reserved.
# This file is distributed under the terms of the LGPL-2.1 license. See the
# file 'LICENSE' in the root directory of the present distribution, or
# https://opensource.org/licenses/LGPL-2.1
#
#
from ase import Atoms
from postqe.ase.io import read_espresso_output
if __name__ == "__main__":
from ase.calculators.calculator import FileIOCalculator, Calculator, kpts2ndarray
Ni = read_espresso_output('Ni.xml')
#test= kpts2ndarray({'path': 'GXG', 'npoints': 200},Ni)
test= kpts2ndarray([2, 2, 2],Ni)
print (test)
exit()
from postqe.ase.calculator import PostqeCalculator
from ase.visualize import view
system = 'Ni'
test = read_espresso_output(system + '.xml')
test.set_calculator(PostqeCalculator(label = system))
print (test.get_atomic_numbers())
print (test.get_cell(True))
print (test.get_positions())
print (test.get_volume())
#view(test)
#Ni2.calc.read_results()
print(test.get_potential_energy())
print(test.calc.get_xc_functional())
print(test.calc.get_number_of_spins())
print(test.calc.get_spin_polarized())
print(test.calc.get_fermi_level())
print(test.calc.get_number_of_bands())
print(test.calc.get_bz_k_points())
print(test.calc.get_k_point_weights())
print(test.calc.get_eigenvalues(0,0))
print(test.calc.get_occupation_numbers(0,0))
print(test.calc.get_eigenvalues(0,1))
print(test.calc.get_occupation_numbers(0,1))
|
QEF/postqe
|
tests/test_ase.py
|
Python
|
lgpl-2.1
| 1,643
|
[
"ASE",
"Quantum ESPRESSO"
] |
ae3b9cf63b2668ca4979ec203ec4b8880cb0bfb35306a4cff11d0ebb295a82f7
|
import logging
from pylons import request, response, session, tmpl_context as c
from zkpylons.lib.helpers import redirect_to
from pylons.decorators import validate
from pylons.decorators.rest import dispatch_on
from formencode import validators, htmlfill, ForEach, Invalid
from formencode.variabledecode import NestedVariables
from zkpylons.lib.base import BaseController, render
from zkpylons.lib.validators import BaseSchema, NotExistingPersonValidator, ExistingPersonValidator, PersonSchema, IAgreeValidator, CountryValidator
import zkpylons.lib.helpers as h
from zkpylons.lib.helpers import check_for_incomplete_profile
from authkit.authorize.pylons_adaptors import authorize
from authkit.permissions import ValidAuthKitUser
from zkpylons.lib.mail import email
from zkpylons.model import meta
from zkpylons.model import Person, PasswordResetConfirmation, Role
from zkpylons.model import ProposalStatus
from zkpylons.model import SocialNetwork
from zkpylons.model import Travel
from zkpylons.config.lca_info import lca_info, lca_rego
from zkpylons.lib.ssl_requirement import enforce_ssl
import datetime
import json
import urllib
import urllib2
log = logging.getLogger(__name__)
class ForgottenPasswordSchema(BaseSchema):
email_address = validators.Email(not_empty=True)
class PasswordResetSchema(BaseSchema):
password = validators.String(not_empty=True)
password_confirm = validators.String(not_empty=True)
chained_validators = [validators.FieldsMatch('password', 'password_confirm')]
class _SocialNetworkSchema(BaseSchema):
name = validators.String()
account_name = validators.String()
class IncompletePersonSchema(BaseSchema):
email_address = validators.Email(not_empty=True)
chained_validators = [NotExistingPersonValidator()]
class NewIncompletePersonSchema(BaseSchema):
pre_validators = [NestedVariables]
person = IncompletePersonSchema()
class NewPersonSchema(BaseSchema):
pre_validators = [NestedVariables]
person = PersonSchema()
social_network = ForEach(_SocialNetworkSchema())
class _UpdatePersonSchema(BaseSchema):
firstname = validators.String(not_empty=True)
lastname = validators.String(not_empty=True)
email_address = validators.Email(not_empty=True)
company = validators.String()
phone = validators.String()
# mobile = validators.String()
address1 = validators.String(not_empty=True)
address2 = validators.String()
city = validators.String(not_empty=True)
state = validators.String()
postcode = validators.String(not_empty=True)
country = CountryValidator(not_empty=True)
i_agree = validators.Bool(if_missing=False)
chained_validators = [IAgreeValidator("i_agree")]
class UpdatePersonSchema(BaseSchema):
person = _UpdatePersonSchema()
social_network = ForEach(_SocialNetworkSchema())
pre_validators = [NestedVariables]
class AuthPersonValidator(validators.FancyValidator):
def validate_python(self, values, state):
c.email = values['email_address']
c.person = Person.find_by_email(c.email)
error_message = None
if c.person is None or not c.person.check_password(values['password']):
error_message = ("Your sign-in details are incorrect; try the"
" 'Forgotten your password' link below or sign up"
" for a new person.")
message = "Login failed"
error_dict = {'email_address_error': error_message}
raise Invalid(message, values, state, error_dict=error_dict)
class PersonaValidator(validators.FancyValidator):
def validate_python(self, values, state):
assertion = values['assertion']
audience = h.url_for(qualified=True, controller='home').strip("/")
page = urllib2.urlopen('https://verifier.login.persona.org/verify',
urllib.urlencode({ "assertion": assertion,
"audience": audience}))
data = json.load(page)
if data['status'] == 'okay':
c.email = data['email']
c.person = Person.find_by_email(c.email)
if c.person is None:
if not lca_info['account_creation']:
error_message = "Your sign-in details are incorrect; try the 'Forgotten your password' link below."
message = "Login failed"
error_dict = {'email_address_error': error_message}
raise Invalid(message, values, state, error_dict=error_dict)
# Create a new account for this email address
c.person = Person()
c.person.email_address = data['email']
c.person.activated = True
meta.Session.add(c.person)
meta.Session.commit()
if not c.person.activated:
# Persona returns verified emails only, so might as well confirm this one...
c.person.activated = True
meta.Session.commit()
class LoginPersonSchema(BaseSchema):
email_address = validators.Email(not_empty=True)
password = validators.String(not_empty=True)
chained_validators = [AuthPersonValidator()]
class LoginSchema(BaseSchema):
person = LoginPersonSchema()
pre_validators = [NestedVariables]
class PersonaLoginSchema(BaseSchema):
assertion = validators.String(not_empty=True)
chained_validators = [PersonaValidator()]
class RoleSchema(BaseSchema):
role = validators.String(not_empty=True)
action = validators.OneOf(['Grant', 'Revoke'])
class TravelSchema(BaseSchema):
origin_airport = validators.String(not_empty=True)
destination_airport = validators.String(not_empty=True)
pre_validators = [NestedVariables]
class OfferSchema(BaseSchema):
status = validators.OneOf(['accept', 'withdraw', 'contact'])
travel = TravelSchema(if_missing=None)
pre_validators = [NestedVariables]
class PersonController(BaseController): #Read, Update, List
@enforce_ssl(required_all=True)
def __before__(self, **kwargs):
pass
@dispatch_on(POST="_signin")
def signin(self):
role_error = session.pop('role_error', None)
if role_error:
h.flash(role_error)
elif h.signed_in_person():
h.flash("You're already logged in")
redirect_to('home')
return render('/person/signin.mako')
def finish_login(self, email):
# Tell authkit we authenticated them
request.environ['paste.auth_tkt.set_user'](email)
h.check_for_incomplete_profile(c.person)
h.flash('You have signed in')
self._redirect_user_optimally()
def _redirect_user_optimally(self):
redirect_location = session.get('redirect_to', None)
if redirect_location:
del session['redirect_to']
session.save()
redirect_to(str(redirect_location))
if lca_info['conference_status'] == 'open':
redirect_to(controller='registration', action='new')
elif lca_info['cfp_status'] == 'open':
redirect_to(controller='proposal')
redirect_to('home')
@validate(schema=LoginSchema(), form='signin', post_only=True, on_get=False, variable_decode=True)
def _signin(self):
self.finish_login(c.email)
@validate(schema=PersonaLoginSchema(), form='persona_login', post_only=True, on_get=False, variable_decode=True)
def persona_login(self):
self.finish_login(c.email)
def signout_confirm(self, id=None):
""" Confirm user wants to sign out
"""
if id is not None:
redirect_to(action='signout_confirm', id=None)
return render('/person/signout.mako')
def signout(self):
""" Sign the user out
Authikit actually does the work after this finished
"""
# return home
h.flash('You have signed out')
redirect_to('home')
def activate(self):
c.person = h.signed_in_person()
if c.person.activated:
# We've since activated, lets go back to where we were
self._redirect_user_optimally()
return render('/person/activate.mako')
def confirm(self, confirm_hash):
"""Confirm a registration with the given ID.
`confirm_hash` is a md5 hash of the email address of the registrant, the time
they regsitered, and a nonce.
"""
person = Person.find_by_url_hash(confirm_hash)
if person.activated:
return render('person/already_confirmed.mako')
person.activated = True
meta.Session.commit()
return render('person/confirmed.mako')
@dispatch_on(POST="_forgotten_password")
def forgotten_password(self):
return render('/person/forgotten_password.mako')
@validate(schema=ForgottenPasswordSchema(), form='forgotten_password', post_only=True, on_get=True, variable_decode=True)
def _forgotten_password(self):
"""Action to let the user request a password change.
GET returns a form for emailing them the password change
confirmation.
POST checks the form and then creates a confirmation record:
date, email_address, and a url_hash that is a hash of a
combination of date, email_address, and a random nonce.
The email address must exist in the person database.
The second half of the password change operation happens in
the ``confirm`` action.
"""
c.email = self.form_result['email_address']
c.person = Person.find_by_email(c.email)
if c.person is not None:
# Check if there is already a password recovery in progress
reset = PasswordResetConfirmation.find_by_email(c.email)
if reset is not None:
return render('person/in_progress.mako')
# Ok kick one off
c.conf_rec = PasswordResetConfirmation(email_address=c.email)
meta.Session.add(c.conf_rec)
meta.Session.commit()
email(c.email, render('person/confirmation_email.mako'))
return render('person/password_confirmation_sent.mako')
@dispatch_on(POST="_reset_password")
def reset_password(self, url_hash):
c.conf_rec = PasswordResetConfirmation.find_by_url_hash(url_hash)
return render('person/reset.mako')
@validate(schema=PasswordResetSchema(), form='reset_password', post_only=True, on_get=True, variable_decode=True)
def _reset_password(self, url_hash):
"""Confirm a password change request, and let the user change
their password.
`url_hash` is a hash of the email address, with which we can
look up the confuirmation record in the database.
If `url_hash` doesn't exist, 404.
If `url_hash` exists and the date is older than 24 hours,
warn the user, offer to send a new confirmation, and delete the
confirmation record.
GET returns a form for setting their password, with their email
address already shown.
POST checks that the email address (in the session, not in the
form) is part of a valid person record (again). If the record
exists, then update the password, hashed. Report success to the
user. Delete the confirmation record.
If the record doesn't exist, throw an error, delete the
confirmation record.
"""
c.conf_rec = PasswordResetConfirmation.find_by_url_hash(url_hash)
now = datetime.datetime.now(c.conf_rec.timestamp.tzinfo)
delta = now - c.conf_rec.timestamp
if delta > datetime.timedelta(hours=24):
# this confirmation record has expired
meta.Session.delete(c.conf_rec)
meta.Session.commit()
return render('person/expired.mako')
c.person = Person.find_by_email(c.conf_rec.email_address)
if c.person is None:
raise RuntimeError, "Person doesn't exist %s" % c.conf_rec.email_address
# set the password
c.person.password = self.form_result['password']
# also make sure the person is activated
c.person.activated = True
# delete the conf rec
meta.Session.delete(c.conf_rec)
meta.Session.commit()
h.flash('Your password has been updated!')
self.finish_login(c.person.email_address)
@authorize(h.auth.is_valid_user)
@dispatch_on(POST="_finish_signup")
def finish_signup(self):
c.form = 'finish_signup'
c.person = h.signed_in_person()
c.social_networks = SocialNetwork.find_all()
c.person.fetch_social_networks()
defaults = h.object_to_defaults(c.person, 'person')
if not defaults['person.country']:
defaults['person.country'] = 'AUSTRALIA'
form = render('/person/finish_signup.mako')
return htmlfill.render(form, defaults)
@authorize(h.auth.is_valid_user)
@validate(schema=UpdatePersonSchema(), form='finish_signup', post_only=True, on_get=True, variable_decode=True)
def _finish_signup(self):
c.person = h.signed_in_person()
self.finish_edit(c.person)
redirect_location = session.pop('redirect_to', None)
if redirect_location:
redirect_to(str(redirect_location))
else:
redirect_to('home')
def finish_edit(self, person):
for key in self.form_result['person']:
setattr(person, key, self.form_result['person'][key])
for sn in self.form_result['social_network']:
network = SocialNetwork.find_by_name(sn['name'])
if sn['account_name']:
person.social_networks[network] = sn['account_name']
elif network in person.social_networks:
del person.social_networks[network]
# update the objects with the validated form data
meta.Session.commit()
@authorize(h.auth.is_valid_user)
@dispatch_on(POST="_edit")
def edit(self, id):
# We need to recheck auth in here so we can pass in the id
if not h.auth.authorized(h.auth.Or(h.auth.is_same_zkpylons_user(id), h.auth.has_organiser_role)):
# Raise a no_auth error
h.auth.no_role()
c.form = 'edit'
c.person = Person.find_by_id(id)
c.social_networks = SocialNetwork.find_all()
c.person.fetch_social_networks()
defaults = h.object_to_defaults(c.person, 'person')
if not defaults['person.country']:
defaults['person.country'] = 'AUSTRALIA'
form = render('/person/edit.mako')
return htmlfill.render(form, defaults)
@authorize(h.auth.is_valid_user)
@validate(schema=UpdatePersonSchema(), form='edit', post_only=True, on_get=True, variable_decode=True)
def _edit(self, id):
"""UPDATE PERSON"""
# We need to recheck auth in here so we can pass in the id
if not h.auth.authorized(h.auth.Or(h.auth.is_same_zkpylons_user(id), h.auth.has_organiser_role)):
# Raise a no_auth error
h.auth.no_role()
c.person = Person.find_by_id(id)
self.finish_edit(c.person)
redirect_to(action='view', id=id)
@authorize(h.auth.is_valid_user)
def reprint(self, id):
c.person = Person.find_by_id(id)
c.person.badge_printed = False
meta.Session.commit()
redirect_to(action='view', id=id)
@dispatch_on(POST="_new")
def new(self):
# Do we allow account creation?
if lca_info['account_creation']:
"""Create a new person form.
"""
if h.signed_in_person():
h.flash("You're already logged in")
redirect_to('home')
defaults = {
'person.country': 'AUSTRALIA',
}
if h.lca_rego['personal_info']['home_address'] == 'no':
defaults['person.address1'] = 'not available'
defaults['person.city'] = 'not available'
defaults['person.postcode'] = 'not available'
c.social_networks = SocialNetwork.find_all()
form = render('/person/new.mako')
return htmlfill.render(form, defaults)
else:
return render('/not_allowed.mako')
@validate(schema=NewPersonSchema(), form='new', post_only=True, on_get=True, variable_decode=True)
def _new(self):
# Do we allow account creation?
if lca_info['account_creation']:
"""Create a new person submit.
"""
# Remove fields not in class
results = self.form_result['person']
del results['password_confirm']
c.person = Person(**results)
c.person.email_address = c.person.email_address.lower()
meta.Session.add(c.person)
#for sn in self.form_result['social_network']:
# network = SocialNetwork.find_by_name(sn['name'])
# if sn['account_name']:
# c.person.social_networks[network] = sn['account_name']
meta.Session.commit()
if lca_rego['confirm_email_address'] == 'no':
redirect_to(controller='person', action='confirm', confirm_hash=c.person.url_hash)
else:
email(c.person.email_address, render('/person/new_person_email.mako'))
# return render('/person/thankyou.mako')
return self.finish_login(c.person.email_address)
else:
return render('/not_allowed.mako')
@authorize(h.auth.has_organiser_role)
@dispatch_on(POST="_new_incomplete")
def new_incomplete(self):
return render('/person/new_incomplete.mako')
@validate(schema=NewIncompletePersonSchema(), form='new_incomplete', post_only=True, on_get=True, variable_decode=True)
def _new_incomplete(self):
results = self.form_result['person']
c.person = Person(**results)
c.person.email_address = c.person.email_address.lower()
meta.Session.add(c.person)
meta.Session.commit()
redirect_to(controller='person', action='index')
@authorize(h.auth.has_organiser_role)
def index(self):
c.person_collection = Person.find_all()
return render('/person/list.mako')
@authorize(h.auth.is_valid_user)
def view(self, id):
# We need to recheck auth in here so we can pass in the id
if not h.auth.authorized(h.auth.Or(h.auth.is_same_zkpylons_user(id), h.auth.has_reviewer_role, h.auth.has_organiser_role)):
# Raise a no_auth error
h.auth.no_role()
c.registration_status = h.config['app_conf'].get('registration_status')
c.person = Person.find_by_id(id)
return render('person/view.mako')
@dispatch_on(POST="_roles")
@authorize(h.auth.has_organiser_role)
def roles(self, id):
c.person = Person.find_by_id(id)
c.roles = Role.find_all()
if not c.person.activated:
h.flash(
"NOTICE: This user hasn't confirmed their email address yet."
" Please get them to visit"
" %s" % h.full_url_for('person/activate'),
category='warning')
return render('person/roles.mako')
@authorize(h.auth.has_organiser_role)
@validate(schema=RoleSchema, form='roles', post_only=True, on_get=True, variable_decode=True)
def _roles(self, id):
""" Lists and changes the person's roles. """
c.person = Person.find_by_id(id)
c.roles = Role.find_all()
role = self.form_result['role']
action = self.form_result['action']
role = Role.find_by_name(name=role)
if action == 'Revoke' and role in c.person.roles:
c.person.roles.remove(role)
h.flash('Role ' + role.name + ' Revoked')
elif action == 'Grant' and role not in c.person.roles:
c.person.roles.append(role)
h.flash('Role ' + role.name + ' Granted')
else:
h.flash("Nothing to do")
meta.Session.commit()
return render('person/roles.mako')
@dispatch_on(POST="_offer")
@authorize(h.auth.is_valid_user)
def offer(self, id):
# We need to recheck auth in here so we can pass in the id
if not h.auth.authorized(h.auth.Or(h.auth.is_same_zkpylons_user(id), h.auth.has_reviewer_role, h.auth.has_organiser_role)):
# Raise a no_auth error
h.auth.no_role()
c.person = Person.find_by_id(id)
c.offers = c.person.proposal_offers
c.travel_assistance = reduce(lambda a, b: a or ('Travel' in b.status.name), c.offers, False) or False
c.accommodation_assistance = reduce(lambda a, b: a or ('Accommodation' in b.status.name), c.offers, False) or False
# Set initial form defaults
defaults = {
'status': 'accept',
}
if c.person.travel:
defaults.update(h.object_to_defaults(c.person.travel, 'travel'))
form = render('person/offer.mako')
return htmlfill.render(form, defaults)
@authorize(h.auth.is_valid_user)
@validate(schema=OfferSchema, form='offer', post_only=True, on_get=True, variable_decode=True)
def _offer(self,id):
# We need to recheck auth in here so we can pass in the id
if not h.auth.authorized(h.auth.Or(h.auth.is_same_zkpylons_user(id), h.auth.has_reviewer_role, h.auth.has_organiser_role)):
# Raise a no_auth error
h.auth.no_role()
c.person = Person.find_by_id(id)
c.offers = c.person.proposal_offers
c.travel_assistance = reduce(lambda a, b: a or ('Travel' in b.status.name), c.offers, False) or False
c.accommodation_assistance = reduce(lambda a, b: a or ('Accommodation' in b.status.name), c.offers, False) or False
# What status are we moving all proposals to?
if self.form_result['status'] == 'accept':
c.status = ProposalStatus.find_by_name('Accepted')
elif self.form_result['status'] == 'withdraw':
c.status = ProposalStatus.find_by_name('Withdrawn')
elif self.form_result['status'] == 'contact':
c.status = ProposalStatus.find_by_name('Contact')
else:
c.status = None
emails = [c.person.email_address]
for offer in c.offers:
offer.status = c.status
if offer.type.notify_email and offer.type.notify_email not in emails:
emails.append(offer.type.notify_email)
if c.travel_assistance:
if not c.person.travel:
self.form_result['travel']['flight_details'] = ''
travel = Travel(**self.form_result['travel'])
meta.Session.add(travel)
c.person.travel = travel
else:
for key in self.form_result['travel']:
setattr(c.person.travel, key, self.form_result['travel'][key])
if c.status.name == 'Accepted':
email(c.person.email_address, render('/person/offer_email.mako'))
else:
email(emails, render('/person/offer_email.mako'))
# update the objects with the validated form data
meta.Session.commit()
return render('person/offer.mako')
|
noisymime/zookeepr
|
zkpylons/controllers/person.py
|
Python
|
gpl-2.0
| 23,362
|
[
"VisIt"
] |
5a591e4efdea17db58bf49b2b0cec0eaa8897cb548d62f41abf24cdee070e883
|
"""
Simple focus detection methods for use with holograms.
Change log:
2016/01/24 -- module started; nloomis@gmail.com
"""
__authors__ = ('nloomis@gmail.com',)
import digitalholography as dhi
import imageutils
import scipy.ndimage
try:
from skimage import filters
except ImportError:
# handles the case where skimage is v0.10 (the filter module was renamed
# to filters in v0.11)
from skimage import filter as filters
from skimage.morphology import disk
#scipy.ndimage.filters.convolve
def SobelMetric(self, field, unused_opts):
# Sobel uses [1, 2, 1; 0, 0, 0; -1, -2, -1]
#return filters.sobel(numpy.abs(field))
return scipy.ndimage.sobel(numpy.abs(field))
def PrewittMetric(self, field, unused_opts):
# Prewitt uses [1,1, 1; 0, 0, 0; -1, -1, -1]
#return filters.prewitt(numpy.abs(field))
return scipy.ndimage.prewitt(numpy.abs(field))
def ScharrMetric(self, field, unused_opts):
# Scharr uses [3, 10, 3; 0, 0, 0; -3, -10, -3]
return filters.scharr(numpy.abs(field))
def GaussianGradientMetric(self, field, opts):
# TODO: use the options
sigma = 2.0
return scipy.ndimage.gaussian_gradient_magnitude(numpy.abs(field), sigma)
def GaussianLaplaceMetric(self, field, opts):
"""Laplace filter using Gaussian second derivatives."""
sigma = 2.0
return scipy.ndimage.gaussian_laplace(numpy.abs(field), sigma)
def LaplaceMetric(self, field, opts):
#TODO: use the options?
#return filters.laplace(numpy.abs(field))
return scipy.ndimage.laplace(numpy.abs(field))
def RobersMetric(self, field, unused_opts):
return filters.roberts(numpy.abs(field))
def EntropyMetric(self, field, opts):
#TODO: use the options?
return filters.rank.entropy(numpy.abs(field), disk(5))
def RangeMetric(self, field, opts):
"""Local range within the structuring element."""
#TODO: use the options
return filters.rank.gradient(numpy.abs(field), disk(5))
def SteerableDerivativeMetric(self, field, opts):
steerable_filter = imageutils.steerable_deriv(sigma=1.5)
S, _, _, _ = imageutils.apply_gradient_filter(numpy.abs(field,
steerable_filter))
return S
def FocusStack(holo, z_position_list, focus_function, options=None):
"""Builds a stack of focus data through a hologram volume.
The hologram is reconstructed at each position in the z_position_list, and the
depth which results in the maximum value of the focus_function at that pixel
is retained. The focus function should accept a complex-valued reconstruction
field and should return a scalar for each pixel that indicates the degree of
focus at that location."""
max_focus_value = numpy.zeros(holo.data.size)
max_foxus_pixel = numpy.zeros(holo.data.size)
for z in z_position_list:
field = holo.reconstuct(z)
this_focus = focus_function(field, options)
max_focus_value = numpy.maximum(max_focus_value, this_focus)
is_at_max = max_focus_value == this_focus
max_focus_pixel(is_at_max) = field(is_at_max)
return max_focus_value, max_focus_pixel
|
nickloomis/loomsci-examples
|
python/holofocus.py
|
Python
|
mit
| 3,035
|
[
"Gaussian"
] |
b73581ff5b2af4d879da02a1bcf4e72dee3fa9644e4778182e5a82938e9abbdd
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RAffycontam(RPackage):
"""structured corruption of cel file data to demonstrate QA
effectiveness."""
homepage = "https://www.bioconductor.org/packages/affyContam/"
url = "https://git.bioconductor.org/packages/affyContam"
version('1.34.0', git='https://git.bioconductor.org/packages/affyContam', commit='03529f26d059c19e069cdda358dbf7789b6d4c40')
depends_on('r@3.4.0:3.4.9', when=('@1.34.0'))
depends_on('r-biobase', type=('build', 'run'))
depends_on('r-affy', type=('build', 'run'))
depends_on('r-affydata', type=('build', 'run'))
|
skosukhin/spack
|
var/spack/repos/builtin/packages/r-affycontam/package.py
|
Python
|
lgpl-2.1
| 1,838
|
[
"Bioconductor"
] |
0569942aef16cbc54a668bb1d6ad881cad93c461564c8d0b08ccfa20c4e95d20
|
# Copyright 2004, Magnus Hagdorn
#
# This file is part of GLIMMER.
#
# GLIMMER is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# GLIMMER is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GLIMMER; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""Creating CF files."""
__all__=['CFVariableDef','CFcreatefile']
import numpy, Scientific.IO.NetCDF,ConfigParser,os,re,string, glob
from CF_file import *
NOATTRIB = ['name','dimensions','data','factor','load','f90file','hot','type','dimlen']
class CFVariableDef(dict):
"""Dictionary containing variable definitions."""
def __init__(self,filename):
"""Initialise Variable class.
filename: name or list of names of file(s) containing variable definitions."""
dict.__init__(self)
# reading variable configuration file
vars = ConfigParser.ConfigParser()
vars.read(filename)
for v in vars.sections():
vardef = {}
vardef['name'] = v
for (name, value) in vars.items(v):
vardef[name] = value
self.__setitem__(v,vardef)
def keys(self):
"""Reorder standard keys alphabetically."""
dk = []
vk = []
for v in dict.keys(self):
if is_dimvar(self.__getitem__(v)):
dk.append(v)
else:
vk.append(v)
dk.sort()
vk.sort()
return dk+vk
class CFcreatefile(CFfile):
"""Creating a CF netCDF file."""
def __init__(self,fname,append=False):
"""Initialise.
fname: name of CF file.
append: set to true if file should be open rw"""
CFfile.__init__(self,fname)
self.mapvarname = 'mapping'
# get variable definitions
try:
vname=os.environ['GLIMMER_PREFIX']
except KeyError:
vname = os.path.expanduser(os.path.join('~','glimmer'))
vname = os.path.join(vname,'share','glimmer')
if not os.path.exists(vname):
raise RuntimeError, 'Cannot find ncdf_vars.def\nPlease set GLIMMER_HOME to where glimmer is installed'
self.vars = CFVariableDef(glob.glob(vname+'/*.def'))
if append:
self.file = Scientific.IO.NetCDF.NetCDFFile(self.fname,'a')
else:
self.file = Scientific.IO.NetCDF.NetCDFFile(self.fname,'w')
self.file.Conventions = "CF-1.0"
def createDimension(self,name, length):
"""Create a dimension.
Creates a new dimension with the given name and length.
length must be a positive integer or None, which stands for
the unlimited dimension. Note that there can be only one
unlimited dimension in a file."""
self.file.createDimension(name,length)
def createVariable(self,name):
"""Create a CF variable.
name: name of variable."""
if name not in self.vars:
raise KeyError, 'Cannot find definition for variable %s'%name
v = self.vars[name]
var = self.file.createVariable(name,'f',tuple(string.replace(v['dimensions'],' ','').split(',')))
for a in v:
if a not in NOATTRIB:
setattr(var,a,v[a])
if self.mapvarname != '' and 'x' in v['dimensions'] and 'y' in v['dimensions']:
var.grid_mapping = self.mapvarname
return var
if __name__ == '__main__':
# creating a test netCDF file
import CF_proj
filename="test.nc"
numx=100
numy=150
proj = CF_proj.DummyProj()
proj.grid_mapping_name='albers_conical_equal_area'
proj.false_easting = [1903971.]
proj.false_northing = [898179.3]
proj.longitude_of_central_meridian = [33.5]
proj.latitude_of_projection_origin = [60.5]
proj.standard_parallel = [52.83333, 68.16666]
cffile = CFcreatefile(filename)
cffile.title = "Test CF file"
cffile.institution = "University of Edinburgh"
cffile.source = "None"
cffile.comment = "Testing if our netCDF files conform with CF standard"
# creating dimensions
cffile.createDimension('x0',numx-1)
cffile.createDimension('x1',numx)
cffile.createDimension('y0',numy-1)
cffile.createDimension('y1',numy)
cffile.createDimension('level',1)
cffile.createDimension('time',None)
cffile.projection=proj
# creating variables
var=cffile.createVariable('x0')
var[:]=numpy.arange(numx-1).astype('f')
var=cffile.createVariable('x1')
var[:]=numpy.arange(numx).astype('f')
var=cffile.createVariable('y0')
var[:]=numpy.arange(numy-1).astype('f')
var=cffile.createVariable('y1')
var[:]=numpy.arange(numy).astype('f')
for v in cffile.vars:
if 'spot' not in v and v not in ['VARSET','level','x0','y0','x1','y1']:
var = cffile.createVariable(v)
cffile.close()
|
glimmer-cism/PyCF
|
PyCF/CF_createfile.py
|
Python
|
gpl-2.0
| 5,333
|
[
"NetCDF"
] |
662b2b011b5132b248ca672ddc3d73dd905770b9ee72902843eb1cf12747745c
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals, print_function
from pymatgen.util.testing import PymatgenTest
from pymatgen.core.periodic_table import Element, Specie
from pymatgen.core.composition import Composition
from pymatgen.core.operations import SymmOp
from pymatgen.core.structure import IStructure, Structure, IMolecule, \
StructureError, Molecule
from pymatgen.core.lattice import Lattice
from pymatgen.electronic_structure.core import Magmom
import random
import os
import numpy as np
class IStructureTest(PymatgenTest):
def setUp(self):
coords = [[0, 0, 0], [0.75, 0.5, 0.75]]
self.lattice = Lattice([[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603]])
self.struct = IStructure(self.lattice, ["Si"] * 2, coords)
self.assertEqual(len(self.struct), 2,
"Wrong number of sites in structure!")
self.assertTrue(self.struct.is_ordered)
self.assertTrue(self.struct.ntypesp == 1)
coords = list()
coords.append([0, 0, 0])
coords.append([0., 0, 0.0000001])
self.assertRaises(StructureError, IStructure, self.lattice,
["Si"] * 2, coords, True)
self.propertied_structure = IStructure(
self.lattice, ["Si"] * 2, coords,
site_properties={'magmom': [5, -5]})
def test_matches(self):
ss = self.struct * 2
self.assertTrue(ss.matches(self.struct))
def test_bad_structure(self):
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
coords.append([0.75, 0.5, 0.75])
self.assertRaises(StructureError, IStructure, self.lattice,
["Si"] * 3, coords, validate_proximity=True)
# these shouldn't raise an error
IStructure(self.lattice, ["Si"] * 2, coords[:2], True)
IStructure(self.lattice, ["Si"], coords[:1], True)
def test_volume_and_density(self):
self.assertAlmostEqual(self.struct.volume, 40.04, 2, "Volume wrong!")
self.assertAlmostEqual(self.struct.density, 2.33, 2,
"Incorrect density")
def test_specie_init(self):
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
s = IStructure(self.lattice, [{Specie('O', -2): 1.0},
{Specie('Mg', 2): 0.8}], coords)
self.assertEqual(s.composition.formula, 'Mg0.8 O1')
def test_get_sorted_structure(self):
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
s = IStructure(self.lattice, ["O", "Li"], coords,
site_properties={'charge': [-2, 1]})
sorted_s = s.get_sorted_structure()
self.assertEqual(sorted_s[0].species_and_occu, Composition("Li"))
self.assertEqual(sorted_s[1].species_and_occu, Composition("O"))
self.assertEqual(sorted_s[0].charge, 1)
self.assertEqual(sorted_s[1].charge, -2)
s = IStructure(self.lattice, ["Se", "C", "Se", "C"],
[[0] * 3, [0.5] * 3, [0.25] * 3, [0.75] * 3])
self.assertEqual([site.specie.symbol
for site in s.get_sorted_structure()],
["C", "C", "Se", "Se"])
def test_get_space_group_data(self):
self.assertEqual(self.struct.get_space_group_info(), ('Fd-3m', 227))
def test_fractional_occupations(self):
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
s = IStructure(self.lattice, [{'O': 1.0}, {'Mg': 0.8}],
coords)
self.assertEqual(s.composition.formula, 'Mg0.8 O1')
self.assertFalse(s.is_ordered)
def test_get_distance(self):
self.assertAlmostEqual(self.struct.get_distance(0, 1), 2.35, 2,
"Distance calculated wrongly!")
pt = [0.9, 0.9, 0.8]
self.assertAlmostEqual(self.struct[0].distance_from_point(pt),
1.50332963784, 2,
"Distance calculated wrongly!")
def test_as_dict(self):
si = Specie("Si", 4)
mn = Element("Mn")
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
struct = IStructure(self.lattice, [{si: 0.5, mn: 0.5}, {si: 0.5}],
coords)
self.assertIn("lattice", struct.as_dict())
self.assertIn("sites", struct.as_dict())
d = self.propertied_structure.as_dict()
self.assertEqual(d['sites'][0]['properties']['magmom'], 5)
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
s = IStructure(self.lattice, [{Specie('O', -2,
properties={"spin": 3}): 1.0},
{Specie('Mg', 2,
properties={"spin": 2}): 0.8}],
coords, site_properties={'magmom': [5, -5]})
d = s.as_dict()
self.assertEqual(d['sites'][0]['properties']['magmom'], 5)
self.assertEqual(d['sites'][0]['species'][0]['properties']['spin'], 3)
d = s.as_dict(0)
self.assertNotIn("volume", d['lattice'])
self.assertNotIn("xyz", d['sites'][0])
def test_from_dict(self):
d = self.propertied_structure.as_dict()
s = IStructure.from_dict(d)
self.assertEqual(s[0].magmom, 5)
d = self.propertied_structure.as_dict(0)
s2 = IStructure.from_dict(d)
self.assertEqual(s, s2)
d = {'lattice': {'a': 3.8401979337, 'volume': 40.044794644251596,
'c': 3.8401979337177736, 'b': 3.840198994344244,
'matrix': [[3.8401979337, 0.0, 0.0],
[1.9200989668, 3.3257101909, 0.0],
[0.0, -2.2171384943, 3.1355090603]],
'alpha': 119.9999908639842, 'beta': 90.0,
'gamma': 60.000009137322195},
'sites': [{'properties': {'magmom': 5}, 'abc': [0.0, 0.0, 0.0],
'occu': 1.0, 'species': [{'occu': 1.0,
'oxidation_state': -2,
'properties': {'spin': 3},
'element': 'O'}],
'label': 'O2-', 'xyz': [0.0, 0.0, 0.0]},
{'properties': {'magmom': -5},
'abc': [0.75, 0.5, 0.75],
'occu': 0.8, 'species': [{'occu': 0.8,
'oxidation_state': 2,
'properties': {'spin': 2},
'element': 'Mg'}],
'label': 'Mg2+:0.800',
'xyz': [3.8401979336749994, 1.2247250003039056e-06,
2.351631795225]}]}
s = IStructure.from_dict(d)
self.assertEqual(s[0].magmom, 5)
self.assertEqual(s[0].specie.spin, 3)
self.assertEqual(type(s), IStructure)
def test_site_properties(self):
site_props = self.propertied_structure.site_properties
self.assertEqual(site_props['magmom'], [5, -5])
self.assertEqual(self.propertied_structure[0].magmom, 5)
self.assertEqual(self.propertied_structure[1].magmom, -5)
def test_copy(self):
new_struct = self.propertied_structure.copy(site_properties={'charge':
[2, 3]})
self.assertEqual(new_struct[0].magmom, 5)
self.assertEqual(new_struct[1].magmom, -5)
self.assertEqual(new_struct[0].charge, 2)
self.assertEqual(new_struct[1].charge, 3)
coords = list()
coords.append([0, 0, 0])
coords.append([0., 0, 0.0000001])
structure = IStructure(self.lattice, ["O", "Si"], coords,
site_properties={'magmom': [5, -5]})
new_struct = structure.copy(site_properties={'charge': [2, 3]},
sanitize=True)
self.assertEqual(new_struct[0].magmom, -5)
self.assertEqual(new_struct[1].magmom, 5)
self.assertEqual(new_struct[0].charge, 3)
self.assertEqual(new_struct[1].charge, 2)
self.assertAlmostEqual(new_struct.volume, structure.volume)
def test_interpolate(self):
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
struct = IStructure(self.lattice, ["Si"] * 2, coords)
coords2 = list()
coords2.append([0, 0, 0])
coords2.append([0.5, 0.5, 0.5])
struct2 = IStructure(self.struct.lattice, ["Si"] * 2, coords2)
int_s = struct.interpolate(struct2, 10)
for s in int_s:
self.assertIsNotNone(s, "Interpolation Failed!")
self.assertEqual(int_s[0].lattice, s.lattice)
self.assertArrayEqual(int_s[1][1].frac_coords, [0.725, 0.5, 0.725])
badlattice = [[1, 0.00, 0.00], [0, 1, 0.00], [0.00, 0, 1]]
struct2 = IStructure(badlattice, ["Si"] * 2, coords2)
self.assertRaises(ValueError, struct.interpolate, struct2)
coords2 = list()
coords2.append([0, 0, 0])
coords2.append([0.5, 0.5, 0.5])
struct2 = IStructure(self.struct.lattice, ["Si", "Fe"], coords2)
self.assertRaises(ValueError, struct.interpolate, struct2)
# Test autosort feature.
s1 = Structure.from_spacegroup("Fm-3m", Lattice.cubic(3),
["Fe"], [[0, 0, 0]])
s1.pop(0)
s2 = Structure.from_spacegroup("Fm-3m", Lattice.cubic(3),
["Fe"], [[0, 0, 0]])
s2.pop(2)
random.shuffle(s2)
for s in s1.interpolate(s2, autosort_tol=0.5):
self.assertArrayAlmostEqual(s1[0].frac_coords, s[0].frac_coords)
self.assertArrayAlmostEqual(s1[2].frac_coords, s[2].frac_coords)
# Make sure autosort has no effect on simpler interpolations,
# and with shuffled sites.
s1 = Structure.from_spacegroup("Fm-3m", Lattice.cubic(3),
["Fe"], [[0, 0, 0]])
s2 = Structure.from_spacegroup("Fm-3m", Lattice.cubic(3),
["Fe"], [[0, 0, 0]])
s2[0] = "Fe", [0.01, 0.01, 0.01]
random.shuffle(s2)
for s in s1.interpolate(s2, autosort_tol=0.5):
self.assertArrayAlmostEqual(s1[1].frac_coords, s[1].frac_coords)
self.assertArrayAlmostEqual(s1[2].frac_coords, s[2].frac_coords)
self.assertArrayAlmostEqual(s1[3].frac_coords, s[3].frac_coords)
def test_interpolate_lattice(self):
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
struct = IStructure(self.lattice, ["Si"] * 2, coords)
coords2 = list()
coords2.append([0, 0, 0])
coords2.append([0.5, 0.5, 0.5])
l2 = Lattice.from_lengths_and_angles([3,4,4], [100,100,70])
struct2 = IStructure(l2, ["Si"] * 2, coords2)
int_s = struct.interpolate(struct2, 2, interpolate_lattices=True)
self.assertArrayAlmostEqual(struct.lattice.abc,
int_s[0].lattice.abc)
self.assertArrayAlmostEqual(struct.lattice.angles,
int_s[0].lattice.angles)
self.assertArrayAlmostEqual(struct2.lattice.abc,
int_s[2].lattice.abc)
self.assertArrayAlmostEqual(struct2.lattice.angles,
int_s[2].lattice.angles)
int_angles = [110.3976469, 94.5359731, 64.5165856]
self.assertArrayAlmostEqual(int_angles,
int_s[1].lattice.angles)
# Assert that volume is monotonic
self.assertTrue(struct2.lattice.volume >= int_s[1].lattice.volume)
self.assertTrue(int_s[1].lattice.volume >= struct.lattice.volume)
def test_interpolate_lattice_rotation(self):
l1 = Lattice([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
l2 = Lattice([[-1.01, 0, 0], [0, -1.01, 0], [0, 0, 1]])
coords = [[0, 0, 0], [0.75, 0.5, 0.75]]
struct1 = IStructure(l1, ["Si"] * 2, coords)
struct2 = IStructure(l2, ["Si"] * 2, coords)
int_s = struct1.interpolate(struct2, 2, interpolate_lattices=True)
# Assert that volume is monotonic
self.assertTrue(struct2.lattice.volume >= int_s[1].lattice.volume)
self.assertTrue(int_s[1].lattice.volume >= struct1.lattice.volume)
def test_get_primitive_structure(self):
coords = [[0, 0, 0], [0.5, 0.5, 0], [0, 0.5, 0.5], [0.5, 0, 0.5]]
fcc_ag = IStructure(Lattice.cubic(4.09), ["Ag"] * 4, coords)
self.assertEqual(len(fcc_ag.get_primitive_structure()), 1)
coords = [[0, 0, 0], [0.5, 0.5, 0.5]]
bcc_li = IStructure(Lattice.cubic(4.09), ["Li"] * 2, coords)
bcc_prim = bcc_li.get_primitive_structure()
self.assertEqual(len(bcc_prim), 1)
self.assertAlmostEqual(bcc_prim.lattice.alpha, 109.47122, 3)
coords = [[0] * 3, [0.5] * 3, [0.25] * 3, [0.26] * 3]
s = IStructure(Lattice.cubic(4.09), ["Ag"] * 4, coords)
self.assertEqual(len(s.get_primitive_structure()), 4)
def test_primitive_cell_site_merging(self):
l = Lattice.cubic(10)
coords = [[0, 0, 0], [0, 0, 0.5],
[0, 0, 0.26], [0, 0, 0.74]]
sp = ['Ag', 'Ag', 'Be', 'Be']
s = Structure(l, sp, coords)
dm = s.get_primitive_structure().distance_matrix
self.assertArrayAlmostEqual(dm, [[0, 2.5], [2.5, 0]])
def test_primitive_on_large_supercell(self):
coords = [[0, 0, 0], [0.5, 0.5, 0], [0, 0.5, 0.5], [0.5, 0, 0.5]]
fcc_ag = Structure(Lattice.cubic(4.09), ["Ag"] * 4, coords)
fcc_ag.make_supercell([2, 2, 2])
fcc_ag_prim = fcc_ag.get_primitive_structure()
self.assertEqual(len(fcc_ag_prim), 1)
self.assertAlmostEqual(fcc_ag_prim.volume, 17.10448225)
def test_primitive_positions(self):
coords = [[0, 0, 0], [0.3, 0.35, 0.45]]
s = Structure(Lattice.from_parameters(1,2,3,50,66,88), ["Ag"] * 2, coords)
a = [[-1,2,-3], [3,2,-4], [1,0,-1]]
b = [[4, 0, 0], [1, 1, 0], [3, 0, 1]]
c = [[2, 0, 0], [1, 3, 0], [1, 1, 1]]
for sc_matrix in [c]:
sc = s.copy()
sc.make_supercell(sc_matrix)
prim = sc.get_primitive_structure(0.01)
self.assertEqual(len(prim), 2)
self.assertAlmostEqual(prim.distance_matrix[0,1], 1.0203432356739286)
def test_primitive_structure_volume_check(self):
l = Lattice.tetragonal(10, 30)
coords = [[0.5, 0.8, 0], [0.5, 0.2, 0],
[0.5, 0.8, 0.333], [0.5, 0.5, 0.333],
[0.5, 0.5, 0.666], [0.5, 0.2, 0.666]]
s = IStructure(l, ["Ag"] * 6, coords)
sprim = s.get_primitive_structure(tolerance=0.1)
self.assertEqual(len(sprim), 6)
def test_get_all_neighbors_and_get_neighbors(self):
s = self.struct
nn = s.get_neighbors_in_shell(s[0].frac_coords, 2, 4,
include_index=True)
self.assertEqual(len(nn), 47)
self.assertEqual(nn[0][-1], 0)
r = random.uniform(3, 6)
all_nn = s.get_all_neighbors(r, True)
for i in range(len(s)):
self.assertEqual(len(all_nn[i]), len(s.get_neighbors(s[i], r)))
for site, nns in zip(s, all_nn):
for nn in nns:
self.assertTrue(nn[0].is_periodic_image(s[nn[2]]))
d = sum((site.coords - nn[0].coords) ** 2) ** 0.5
self.assertAlmostEqual(d, nn[1])
s = Structure(Lattice.cubic(1), ['Li'], [[0,0,0]])
s.make_supercell([2,2,2])
self.assertEqual(sum(map(len, s.get_all_neighbors(3))), 976)
def test_get_all_neighbors_outside_cell(self):
s = Structure(Lattice.cubic(2), ['Li', 'Li', 'Li', 'Si'],
[[3.1] * 3, [0.11] * 3, [-1.91] * 3, [0.5] * 3])
all_nn = s.get_all_neighbors(0.2, True)
for site, nns in zip(s, all_nn):
for nn in nns:
self.assertTrue(nn[0].is_periodic_image(s[nn[2]]))
d = sum((site.coords - nn[0].coords) ** 2) ** 0.5
self.assertAlmostEqual(d, nn[1])
self.assertEqual(list(map(len, all_nn)), [2, 2, 2, 0])
def test_get_dist_matrix(self):
ans = [[0., 2.3516318],
[2.3516318, 0.]]
self.assertArrayAlmostEqual(self.struct.distance_matrix, ans)
def test_to_from_file_string(self):
for fmt in ["cif", "json", "poscar", "cssr"]:
s = self.struct.to(fmt=fmt)
self.assertIsNotNone(s)
ss = IStructure.from_str(s, fmt=fmt)
self.assertArrayAlmostEqual(
ss.lattice.lengths_and_angles,
self.struct.lattice.lengths_and_angles, decimal=5)
self.assertArrayAlmostEqual(ss.frac_coords, self.struct.frac_coords)
self.assertIsInstance(ss, IStructure)
self.struct.to(filename="POSCAR.testing")
self.assertTrue(os.path.exists("POSCAR.testing"))
os.remove("POSCAR.testing")
self.struct.to(filename="Si_testing.yaml")
self.assertTrue(os.path.exists("Si_testing.yaml"))
s = Structure.from_file("Si_testing.yaml")
self.assertEqual(s, self.struct)
os.remove("Si_testing.yaml")
self.struct.to(filename="POSCAR.testing.gz")
s = Structure.from_file("POSCAR.testing.gz")
self.assertEqual(s, self.struct)
os.remove("POSCAR.testing.gz")
class StructureTest(PymatgenTest):
def setUp(self):
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
lattice = Lattice([[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603]])
self.structure = Structure(lattice, ["Si", "Si"], coords)
def test_mutable_sequence_methods(self):
s = self.structure
s[0] = "Fe"
self.assertEqual(s.formula, "Fe1 Si1")
s[0] = "Fe", [0.5, 0.5, 0.5]
self.assertEqual(s.formula, "Fe1 Si1")
self.assertArrayAlmostEqual(s[0].frac_coords, [0.5, 0.5, 0.5])
s.reverse()
self.assertEqual(s[0].specie, Element("Si"))
self.assertArrayAlmostEqual(s[0].frac_coords, [0.75, 0.5, 0.75])
s[0] = {"Mn": 0.5}
self.assertEqual(s.formula, "Mn0.5 Fe1")
del s[1]
self.assertEqual(s.formula, "Mn0.5")
s[0] = "Fe", [0.9, 0.9, 0.9], {"magmom": 5}
self.assertEqual(s.formula, "Fe1")
self.assertEqual(s[0].magmom, 5)
# Test atomic replacement.
s["Fe"] = "Mn"
self.assertEqual(s.formula, "Mn1")
# Test slice replacement.
s = PymatgenTest.get_structure("Li2O")
s[1:3] = "S"
self.assertEqual(s.formula, "Li1 S2")
def test_non_hash(self):
self.assertRaises(TypeError, dict, [(self.structure, 1)])
def test_sort(self):
s = self.structure
s[0] = "F"
s.sort()
self.assertEqual(s[0].species_string, "Si")
self.assertEqual(s[1].species_string, "F")
s.sort(key=lambda site: site.species_string)
self.assertEqual(s[0].species_string, "F")
self.assertEqual(s[1].species_string, "Si")
s.sort(key=lambda site: site.species_string, reverse=True)
self.assertEqual(s[0].species_string, "Si")
self.assertEqual(s[1].species_string, "F")
def test_append_insert_remove_replace(self):
s = self.structure
s.insert(1, "O", [0.5, 0.5, 0.5])
self.assertEqual(s.formula, "Si2 O1")
self.assertTrue(s.ntypesp == 2)
self.assertTrue(s.symbol_set == ("Si", "O"))
self.assertTrue(s.indices_from_symbol("Si") == (0,2))
self.assertTrue(s.indices_from_symbol("O") == (1,))
del s[2]
self.assertEqual(s.formula, "Si1 O1")
self.assertTrue(s.indices_from_symbol("Si") == (0,))
self.assertTrue(s.indices_from_symbol("O") == (1,))
s.append("N", [0.25, 0.25, 0.25])
self.assertEqual(s.formula, "Si1 N1 O1")
self.assertTrue(s.ntypesp == 3)
self.assertTrue(s.symbol_set == ("Si", "O", "N"))
self.assertTrue(s.indices_from_symbol("Si") == (0,))
self.assertTrue(s.indices_from_symbol("O") == (1,))
self.assertTrue(s.indices_from_symbol("N") == (2,))
s[0] = "Ge"
self.assertEqual(s.formula, "Ge1 N1 O1")
self.assertTrue(s.symbol_set == ("Ge", "O", "N"))
s.replace_species({"Ge": "Si"})
self.assertEqual(s.formula, "Si1 N1 O1")
self.assertTrue(s.ntypesp == 3)
s.replace_species({"Si": {"Ge": 0.5, "Si": 0.5}})
self.assertEqual(s.formula, "Si0.5 Ge0.5 N1 O1")
#this should change the .5Si .5Ge sites to .75Si .25Ge
s.replace_species({"Ge": {"Ge": 0.5, "Si": 0.5}})
self.assertEqual(s.formula, "Si0.75 Ge0.25 N1 O1")
# In this case, s.ntypesp is ambiguous.
# for the time being, we raise AttributeError.
with self.assertRaises(AttributeError):
s.ntypesp
s.remove_species(["Si"])
self.assertEqual(s.formula, "Ge0.25 N1 O1")
s.remove_sites([1, 2])
self.assertEqual(s.formula, "Ge0.25")
def test_add_site_property(self):
s = self.structure
s.add_site_property("charge", [4.1, -5])
self.assertEqual(s[0].charge, 4.1)
self.assertEqual(s[1].charge, -5)
s.add_site_property("magmom", [3, 2])
self.assertEqual(s[0].charge, 4.1)
self.assertEqual(s[0].magmom, 3)
def test_propertied_structure(self):
#Make sure that site properties are set to None for missing values.
s = self.structure
s.add_site_property("charge", [4.1, -5])
s.append("Li", [0.3, 0.3 ,0.3])
self.assertEqual(len(s.site_properties["charge"]), 3)
def test_perturb(self):
d = 0.1
pre_perturbation_sites = self.structure.sites[:]
self.structure.perturb(distance=d)
post_perturbation_sites = self.structure.sites
for i, x in enumerate(pre_perturbation_sites):
self.assertAlmostEqual(x.distance(post_perturbation_sites[i]), d,
3, "Bad perturbation distance")
def test_add_oxidation_states(self):
oxidation_states = {"Si": -4}
self.structure.add_oxidation_state_by_element(oxidation_states)
for site in self.structure:
for k in site.species_and_occu.keys():
self.assertEqual(k.oxi_state, oxidation_states[k.symbol],
"Wrong oxidation state assigned!")
oxidation_states = {"Fe": 2}
self.assertRaises(ValueError,
self.structure.add_oxidation_state_by_element,
oxidation_states)
self.structure.add_oxidation_state_by_site([2, -4])
self.assertEqual(self.structure[0].specie.oxi_state, 2)
self.assertRaises(ValueError,
self.structure.add_oxidation_state_by_site,
[1])
def test_remove_oxidation_states(self):
co_elem = Element("Co")
o_elem = Element("O")
co_specie = Specie("Co", 2)
o_specie = Specie("O", -2)
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
lattice = Lattice.cubic(10)
s_elem = Structure(lattice, [co_elem, o_elem], coords)
s_specie = Structure(lattice, [co_specie, o_specie], coords)
s_specie.remove_oxidation_states()
self.assertEqual(s_elem, s_specie, "Oxidation state remover "
"failed")
def test_apply_operation(self):
op = SymmOp.from_axis_angle_and_translation([0, 0, 1], 90)
s = self.structure.copy()
s.apply_operation(op)
self.assertArrayAlmostEqual(
s.lattice.matrix,
[[0.000000, 3.840198, 0.000000],
[-3.325710, 1.920099, 0.000000],
[2.217138, -0.000000, 3.135509]], 5)
op = SymmOp([[1, 1, 0, 0.5], [1, 0, 0, 0.5], [0, 0, 1, 0.5],
[0, 0, 0, 1]])
s = self.structure.copy()
s.apply_operation(op, fractional=True)
self.assertArrayAlmostEqual(
s.lattice.matrix,
[[5.760297, 3.325710, 0.000000],
[3.840198, 0.000000, 0.000000],
[0.000000, -2.217138, 3.135509]], 5)
def test_apply_strain(self):
s = self.structure
initial_coord = s[1].coords
s.apply_strain(0.01)
self.assertAlmostEqual(
s.lattice.abc,
(3.8785999130369997, 3.878600984287687, 3.8785999130549516))
self.assertArrayAlmostEqual(s[1].coords, initial_coord * 1.01)
a1, b1, c1 = s.lattice.abc
s.apply_strain([0.1, 0.2, 0.3])
a2, b2, c2 = s.lattice.abc
self.assertAlmostEqual(a2 / a1, 1.1)
self.assertAlmostEqual(b2 / b1, 1.2)
self.assertAlmostEqual(c2 / c1, 1.3)
def test_scale_lattice(self):
initial_coord = self.structure[1].coords
self.structure.scale_lattice(self.structure.volume * 1.01 ** 3)
self.assertArrayAlmostEqual(
self.structure.lattice.abc,
(3.8785999130369997, 3.878600984287687, 3.8785999130549516))
self.assertArrayAlmostEqual(self.structure[1].coords,
initial_coord * 1.01)
def test_translate_sites(self):
self.structure.translate_sites([0, 1], [0.5, 0.5, 0.5],
frac_coords=True)
self.assertArrayEqual(self.structure.frac_coords[0],
[0.5, 0.5, 0.5])
self.structure.translate_sites([0], [0.5, 0.5, 0.5],
frac_coords=False)
self.assertArrayAlmostEqual(self.structure.cart_coords[0],
[3.38014845, 1.05428585, 2.06775453])
self.structure.translate_sites([0], [0.5, 0.5, 0.5],
frac_coords=True, to_unit_cell=False)
self.assertArrayAlmostEqual(self.structure.frac_coords[0],
[1.00187517, 1.25665291, 1.15946374])
def test_mul(self):
self.structure *= [2, 1, 1]
self.assertEqual(self.structure.formula, "Si4")
s = [2, 1, 1] * self.structure
self.assertEqual(s.formula, "Si8")
self.assertIsInstance(s, Structure)
s = self.structure * [[1, 0, 0], [2, 1, 0], [0, 0, 2]]
self.assertEqual(s.formula, "Si8")
self.assertArrayAlmostEqual(s.lattice.abc,
[7.6803959, 17.5979979, 7.6803959])
def test_make_supercell(self):
self.structure.make_supercell([2, 1, 1])
self.assertEqual(self.structure.formula, "Si4")
self.structure.make_supercell([[1, 0, 0], [2, 1, 0], [0, 0, 1]])
self.assertEqual(self.structure.formula, "Si4")
self.structure.make_supercell(2)
self.assertEqual(self.structure.formula, "Si32")
self.assertArrayAlmostEqual(self.structure.lattice.abc,
[15.360792, 35.195996, 7.680396], 5)
def test_disordered_supercell_primitive_cell(self):
l = Lattice.cubic(2)
f = [[0.5, 0.5, 0.5]]
sp = [{'Si': 0.54738}]
s = Structure(l, sp, f)
#this supercell often breaks things
s.make_supercell([[0,-1,1],[-1,1,0],[1,1,1]])
self.assertEqual(len(s.get_primitive_structure()), 1)
def test_another_supercell(self):
#this is included b/c for some reason the old algo was failing on it
s = self.structure.copy()
s.make_supercell([[0, 2, 2], [2, 0, 2], [2, 2, 0]])
self.assertEqual(s.formula, "Si32")
s = self.structure.copy()
s.make_supercell([[0, 2, 0], [1, 0, 0], [0, 0, 1]])
self.assertEqual(s.formula, "Si4")
def test_to_from_dict(self):
d = self.structure.as_dict()
s2 = Structure.from_dict(d)
self.assertEqual(type(s2), Structure)
def test_to_from_abivars(self):
"""Test as_dict, from_dict with fmt == abivars."""
d = self.structure.as_dict(fmt="abivars")
s2 = Structure.from_dict(d, fmt="abivars")
self.assertEqual(s2, self.structure)
self.assertEqual(type(s2), Structure)
def test_to_from_file_string(self):
for fmt in ["cif", "json", "poscar", "cssr", "yaml", "xsf"]:
s = self.structure.to(fmt=fmt)
self.assertIsNotNone(s)
ss = Structure.from_str(s, fmt=fmt)
self.assertArrayAlmostEqual(
ss.lattice.lengths_and_angles,
self.structure.lattice.lengths_and_angles, decimal=5)
self.assertArrayAlmostEqual(ss.frac_coords,
self.structure.frac_coords)
self.assertIsInstance(ss, Structure)
self.structure.to(filename="POSCAR.testing")
self.assertTrue(os.path.exists("POSCAR.testing"))
os.remove("POSCAR.testing")
self.structure.to(filename="structure_testing.json")
self.assertTrue(os.path.exists("structure_testing.json"))
s = Structure.from_file("structure_testing.json")
self.assertEqual(s, self.structure)
os.remove("structure_testing.json")
def test_from_spacegroup(self):
s1 = Structure.from_spacegroup("Fm-3m", Lattice.cubic(3), ["Li", "O"],
[[0.25, 0.25, 0.25], [0, 0, 0]])
self.assertEqual(s1.formula, "Li8 O4")
s2 = Structure.from_spacegroup(225, Lattice.cubic(3), ["Li", "O"],
[[0.25, 0.25, 0.25], [0, 0, 0]])
self.assertEqual(s1, s2)
s2 = Structure.from_spacegroup(225, Lattice.cubic(3), ["Li", "O"],
[[0.25, 0.25, 0.25], [0, 0, 0]],
site_properties={"charge": [1, -2]})
self.assertEqual(sum(s2.site_properties["charge"]), 0)
s = Structure.from_spacegroup("Pm-3m", Lattice.cubic(3), ["Cs", "Cl"],
[[0, 0, 0], [0.5, 0.5, 0.5]])
self.assertEqual(s.formula, "Cs1 Cl1")
self.assertRaises(ValueError, Structure.from_spacegroup,
"Pm-3m", Lattice.tetragonal(1, 3), ["Cs", "Cl"],
[[0, 0, 0], [0.5, 0.5, 0.5]])
self.assertRaises(ValueError, Structure.from_spacegroup,
"Pm-3m", Lattice.cubic(3), ["Cs"],
[[0, 0, 0], [0.5, 0.5, 0.5]])
def test_from_magnetic_spacegroup(self):
# AFM MnF
s1 = Structure.from_magnetic_spacegroup("P4_2'/mnm'", Lattice.tetragonal(4.87, 3.30),
["Mn", "F"],
[[0, 0, 0],
[0.30, 0.30, 0.00]],
{'magmom': [4, 0]})
self.assertEqual(s1.formula, "Mn2 F4")
self.assertEqual(sum(map(float, s1.site_properties['magmom'])), 0)
self.assertEqual(max(map(float, s1.site_properties['magmom'])), 4)
self.assertEqual(min(map(float, s1.site_properties['magmom'])), -4)
# AFM LaMnO3, ordered on (001) planes
s2 = Structure.from_magnetic_spacegroup("Pn'ma'", Lattice.orthorhombic(5.75, 7.66, 5.53),
["La", "Mn", "O", "O"],
[[0.05, 0.25, 0.99],
[0.00, 0.00, 0.50],
[0.48, 0.25, 0.08],
[0.31, 0.04, 0.72]],
{'magmom': [0, Magmom([4, 0, 0]), 0, 0]})
self.assertEqual(s2.formula, "La4 Mn4 O12")
self.assertEqual(sum(map(float, s2.site_properties['magmom'])), 0)
self.assertEqual(max(map(float, s2.site_properties['magmom'])), 4)
self.assertEqual(min(map(float, s2.site_properties['magmom'])), -4)
def test_merge_sites(self):
species = [{'Ag': 0.5}, {'Cl': 0.25}, {'Cl': 0.1},
{'Ag': 0.5}, {'F': 0.15}, {'F': 0.1}]
coords = [[0, 0, 0], [0.5, 0.5, 0.5], [0.5, 0.5, 0.5],
[0, 0, 0], [0.5, 0.5, 1.501], [0.5, 0.5, 1.501]]
s = Structure(Lattice.cubic(1), species, coords)
s.merge_sites(mode="s")
self.assertEqual(s[0].specie.symbol, 'Ag')
self.assertEqual(s[1].species_and_occu,
Composition({'Cl': 0.35, 'F': 0.25}))
self.assertArrayAlmostEqual(s[1].frac_coords, [.5, .5, .5005])
# Test for TaS2 with spacegroup 166 in 160 setting.
l = Lattice.from_lengths_and_angles([3.374351, 3.374351, 20.308941],
[90.000000, 90.000000, 120.000000])
species = ["Ta", "S", "S"]
coords = [[0.000000, 0.000000, 0.944333], [0.333333, 0.666667, 0.353424],
[0.666667, 0.333333, 0.535243]]
tas2 = Structure.from_spacegroup(160, l, species, coords)
assert len(tas2) == 13
tas2.merge_sites(mode="d")
assert len(tas2) == 9
l = Lattice.from_lengths_and_angles([3.587776, 3.587776, 19.622793],
[90.000000, 90.000000, 120.000000])
species = ["Na", "V", "S", "S"]
coords = [[0.333333, 0.666667, 0.165000], [0.000000, 0.000000, 0.998333],
[0.333333, 0.666667, 0.399394], [0.666667, 0.333333, 0.597273]]
navs2 = Structure.from_spacegroup(160, l, species, coords)
assert len(navs2) == 18
navs2.merge_sites(mode="d")
assert len(navs2) == 12
def test_properties(self):
self.assertEqual(self.structure.num_sites, len(self.structure))
self.structure.make_supercell(2)
self.structure[1] = "C"
sites = list(self.structure.group_by_types())
self.assertEqual(sites[-1].specie.symbol, "C")
self.structure.add_oxidation_state_by_element({"Si": 4, "C": 2})
self.assertEqual(self.structure.charge, 62)
def test_set_item(self):
s = self.structure.copy()
s[0] = "C"
self.assertEqual(s.formula, "Si1 C1")
s[(0, 1)] = "Ge"
self.assertEqual(s.formula, "Ge2")
s[0:2] = "Sn"
self.assertEqual(s.formula, "Sn2")
s = self.structure.copy()
s["Si"] = "C"
self.assertEqual(s.formula, "C2")
s["C"] = "C0.25Si0.5"
self.assertEqual(s.formula, "Si1 C0.5")
s["C"] = "C0.25Si0.5"
self.assertEqual(s.formula, "Si1.25 C0.125")
def test_init_error(self):
self.assertRaises(StructureError, Structure, Lattice.cubic(3), ["Si"], [[0, 0, 0], [0.5, 0.5, 0.5]])
def test_from_sites(self):
self.structure.add_site_property("hello", [1, 2])
s = Structure.from_sites(self.structure, to_unit_cell=True)
self.assertEqual(s.site_properties["hello"][1], 2)
def test_magic(self):
s = Structure.from_sites(self.structure)
self.assertEqual(s, self.structure)
self.assertNotEqual(s, None)
s.apply_strain(0.5)
self.assertNotEqual(s, self.structure)
self.assertNotEqual(self.structure * 2, self.structure)
class IMoleculeTest(PymatgenTest):
def setUp(self):
coords = [[0.000000, 0.000000, 0.000000],
[0.000000, 0.000000, 1.089000],
[1.026719, 0.000000, -0.363000],
[-0.513360, -0.889165, -0.363000],
[-0.513360, 0.889165, -0.363000]]
self.coords = coords
self.mol = Molecule(["C", "H", "H", "H", "H"], coords)
def test_set_item(self):
s = self.mol.copy()
s[0] = "Si"
self.assertEqual(s.formula, "Si1 H4")
s[(0, 1)] = "Ge"
self.assertEqual(s.formula, "Ge2 H3")
s[0:2] = "Sn"
self.assertEqual(s.formula, "Sn2 H3")
s = self.mol.copy()
s["H"] = "F"
self.assertEqual(s.formula, "C1 F4")
s["C"] = "C0.25Si0.5"
self.assertEqual(s.formula, "Si0.5 C0.25 F4")
s["C"] = "C0.25Si0.5"
self.assertEqual(s.formula, "Si0.625 C0.0625 F4")
def test_bad_molecule(self):
coords = [[0.000000, 0.000000, 0.000000],
[0.000000, 0.000000, 1.089000],
[1.026719, 0.000000, -0.363000],
[-0.513360, -0.889165, -0.363000],
[-0.513360, 0.889165, -0.363000],
[-0.513360, 0.889165, -0.36301]]
self.assertRaises(StructureError, Molecule,
["C", "H", "H", "H", "H", "H"], coords,
validate_proximity=True)
def test_get_angle_dihedral(self):
self.assertAlmostEqual(self.mol.get_angle(1, 0, 2), 109.47122144618737)
self.assertAlmostEqual(self.mol.get_angle(3, 1, 2), 60.00001388659683)
self.assertAlmostEqual(self.mol.get_dihedral(0, 1, 2, 3),
- 35.26438851071765)
coords = list()
coords.append([0, 0, 0])
coords.append([0, 0, 1])
coords.append([0, 1, 1])
coords.append([1, 1, 1])
self.mol2 = Molecule(["C", "O", "N", "S"], coords)
self.assertAlmostEqual(self.mol2.get_dihedral(0, 1, 2, 3), -90)
def test_get_covalent_bonds(self):
self.assertEqual(len(self.mol.get_covalent_bonds()), 4)
def test_properties(self):
self.assertEqual(len(self.mol), 5)
self.assertTrue(self.mol.is_ordered)
self.assertEqual(self.mol.formula, "H4 C1")
def test_repr_str(self):
ans = """Full Formula (H4 C1)
Reduced Formula: H4C
Charge = 0, Spin Mult = 1
Sites (5)
0 C 0.000000 0.000000 0.000000
1 H 0.000000 0.000000 1.089000
2 H 1.026719 0.000000 -0.363000
3 H -0.513360 -0.889165 -0.363000
4 H -0.513360 0.889165 -0.363000"""
self.assertEqual(self.mol.__str__(), ans)
ans = """Molecule Summary
Site: C (0.0000, 0.0000, 0.0000)
Site: H (0.0000, 0.0000, 1.0890)
Site: H (1.0267, 0.0000, -0.3630)
Site: H (-0.5134, -0.8892, -0.3630)
Site: H (-0.5134, 0.8892, -0.3630)"""
self.assertEqual(repr(self.mol), ans)
def test_site_properties(self):
propertied_mol = Molecule(["C", "H", "H", "H", "H"], self.coords,
site_properties={'magmom':
[0.5, -0.5, 1, 2, 3]})
self.assertEqual(propertied_mol[0].magmom, 0.5)
self.assertEqual(propertied_mol[1].magmom, -0.5)
def test_get_boxed_structure(self):
s = self.mol.get_boxed_structure(9, 9, 9)
# C atom should be in center of box.
self.assertArrayAlmostEqual(s[4].frac_coords,
[0.50000001, 0.5, 0.5])
self.assertArrayAlmostEqual(s[1].frac_coords,
[0.6140799, 0.5, 0.45966667])
self.assertRaises(ValueError, self.mol.get_boxed_structure, 1, 1, 1)
s2 = self.mol.get_boxed_structure(5, 5, 5, (2, 3, 4))
self.assertEqual(len(s2), 24 * 5)
self.assertEqual(s2.lattice.abc, (10, 15, 20))
# Test offset option
s3 = self.mol.get_boxed_structure(9, 9, 9, offset=[0.5,0.5,0.5])
self.assertArrayAlmostEqual(s3[4].coords,
[5,5,5])
# Test no_cross option
self.assertRaises(ValueError, self.mol.get_boxed_structure,
5, 5, 5, offset=[10,10,10],no_cross = True)
def test_get_distance(self):
self.assertAlmostEqual(self.mol.get_distance(0, 1), 1.089)
def test_get_neighbors(self):
nn = self.mol.get_neighbors(self.mol[0], 1)
self.assertEqual(len(nn), 0)
nn = self.mol.get_neighbors(self.mol[0], 2)
self.assertEqual(len(nn), 4)
def test_get_neighbors_in_shell(self):
nn = self.mol.get_neighbors_in_shell([0, 0, 0], 0, 1)
self.assertEqual(len(nn), 1)
nn = self.mol.get_neighbors_in_shell([0, 0, 0], 1, 0.9)
self.assertEqual(len(nn), 4)
nn = self.mol.get_neighbors_in_shell([0, 0, 0], 1, 0.9)
self.assertEqual(len(nn), 4)
nn = self.mol.get_neighbors_in_shell([0, 0, 0], 2, 0.1)
self.assertEqual(len(nn), 0)
def test_get_dist_matrix(self):
ans = [[0.0, 1.089, 1.08899995636, 1.08900040717, 1.08900040717],
[1.089, 0.0, 1.77832952654, 1.7783298026, 1.7783298026],
[1.08899995636, 1.77832952654, 0.0, 1.77833003783,
1.77833003783],
[1.08900040717, 1.7783298026, 1.77833003783, 0.0, 1.77833],
[1.08900040717, 1.7783298026, 1.77833003783, 1.77833, 0.0]]
self.assertArrayAlmostEqual(self.mol.distance_matrix, ans)
def test_break_bond(self):
(mol1, mol2) = self.mol.break_bond(0, 1)
self.assertEqual(mol1.formula, "H3 C1")
self.assertEqual(mol2.formula, "H1")
def test_prop(self):
self.assertEqual(self.mol.charge, 0)
self.assertEqual(self.mol.spin_multiplicity, 1)
self.assertEqual(self.mol.nelectrons, 10)
self.assertArrayAlmostEqual(self.mol.center_of_mass, [0, 0, 0])
self.assertRaises(ValueError, Molecule, ["C", "H", "H", "H", "H"],
self.coords, charge=1, spin_multiplicity=1)
mol = Molecule(["C", "H", "H", "H", "H"], self.coords, charge=1)
self.assertEqual(mol.spin_multiplicity, 2)
self.assertEqual(mol.nelectrons, 9)
#Triplet O2
mol = IMolecule(["O"] * 2, [[0, 0, 0], [0, 0, 1.2]],
spin_multiplicity=3)
self.assertEqual(mol.spin_multiplicity, 3)
def test_equal(self):
mol = IMolecule(["C", "H", "H", "H", "H"], self.coords, charge=1)
self.assertNotEqual(mol, self.mol)
def test_get_centered_molecule(self):
mol = IMolecule(["O"] * 2, [[0, 0, 0], [0, 0, 1.2]],
spin_multiplicity=3)
centered = mol.get_centered_molecule()
self.assertArrayAlmostEqual(centered.center_of_mass, [0, 0, 0])
def test_to_from_dict(self):
d = self.mol.as_dict()
mol2 = IMolecule.from_dict(d)
self.assertEqual(type(mol2), IMolecule)
propertied_mol = Molecule(["C", "H", "H", "H", "H"], self.coords,
charge=1,
site_properties={'magmom':
[0.5, -0.5, 1, 2, 3]})
d = propertied_mol.as_dict()
self.assertEqual(d['sites'][0]['properties']['magmom'], 0.5)
mol = Molecule.from_dict(d)
self.assertEqual(propertied_mol, mol)
self.assertEqual(mol[0].magmom, 0.5)
self.assertEqual(mol.formula, "H4 C1")
self.assertEqual(mol.charge, 1)
def test_to_from_file_string(self):
for fmt in ["xyz", "json", "g03", "yaml"]:
s = self.mol.to(fmt=fmt)
self.assertIsNotNone(s)
m = IMolecule.from_str(s, fmt=fmt)
self.assertEqual(m, self.mol)
self.assertIsInstance(m, IMolecule)
self.mol.to(filename="CH4_testing.xyz")
self.assertTrue(os.path.exists("CH4_testing.xyz"))
os.remove("CH4_testing.xyz")
self.mol.to(filename="CH4_testing.yaml")
self.assertTrue(os.path.exists("CH4_testing.yaml"))
mol = Molecule.from_file("CH4_testing.yaml")
self.assertEqual(self.mol, mol)
os.remove("CH4_testing.yaml")
class MoleculeTest(PymatgenTest):
def setUp(self):
coords = [[0.000000, 0.000000, 0.000000],
[0.000000, 0.000000, 1.089000],
[1.026719, 0.000000, -0.363000],
[-0.513360, -0.889165, -0.363000],
[-0.513360, 0.889165, -0.363000]]
self.mol = Molecule(["C", "H", "H", "H", "H"], coords)
def test_mutable_sequence_methods(self):
s = self.mol
s[1] = ("F", [0.5, 0.5, 0.5])
self.assertEqual(s.formula, "H3 C1 F1")
self.assertArrayAlmostEqual(s[1].coords, [0.5, 0.5, 0.5])
s.reverse()
self.assertEqual(s[0].specie, Element("H"))
self.assertArrayAlmostEqual(s[0].coords,
[-0.513360, 0.889165, -0.363000])
del s[1]
self.assertEqual(s.formula, "H2 C1 F1")
s[3] = "N", [0,0,0], {"charge": 4}
self.assertEqual(s.formula, "H2 N1 F1")
self.assertEqual(s[3].charge, 4)
def test_insert_remove_append(self):
mol = self.mol
mol.insert(1, "O", [0.5, 0.5, 0.5])
self.assertEqual(mol.formula, "H4 C1 O1")
del mol[2]
self.assertEqual(mol.formula, "H3 C1 O1")
mol.set_charge_and_spin(0)
self.assertEqual(mol.spin_multiplicity, 2)
mol.append("N", [1, 1, 1])
self.assertEqual(mol.formula, "H3 C1 N1 O1")
self.assertRaises(TypeError, dict, [(mol, 1)])
mol.remove_sites([0, 1])
self.assertEqual(mol.formula, "H3 N1")
def test_translate_sites(self):
self.mol.translate_sites([0, 1], [0.5, 0.5, 0.5])
self.assertArrayEqual(self.mol.cart_coords[0],
[0.5, 0.5, 0.5])
def test_rotate_sites(self):
self.mol.rotate_sites(theta=np.radians(30))
self.assertArrayAlmostEqual(self.mol.cart_coords[2],
[ 0.889164737, 0.513359500, -0.363000000])
def test_replace(self):
self.mol[0] = "Ge"
self.assertEqual(self.mol.formula, "Ge1 H4")
self.mol.replace_species({Element("Ge"): {Element("Ge"): 0.5,
Element("Si"): 0.5}})
self.assertEqual(self.mol.formula, "Si0.5 Ge0.5 H4")
#this should change the .5Si .5Ge sites to .75Si .25Ge
self.mol.replace_species({Element("Ge"): {Element("Ge"): 0.5,
Element("Si"): 0.5}})
self.assertEqual(self.mol.formula, "Si0.75 Ge0.25 H4")
d = 0.1
pre_perturbation_sites = self.mol.sites[:]
self.mol.perturb(distance=d)
post_perturbation_sites = self.mol.sites
for i, x in enumerate(pre_perturbation_sites):
self.assertAlmostEqual(x.distance(post_perturbation_sites[i]), d,
3, "Bad perturbation distance")
def test_add_site_property(self):
self.mol.add_site_property("charge", [4.1, -2, -2, -2, -2])
self.assertEqual(self.mol[0].charge, 4.1)
self.assertEqual(self.mol[1].charge, -2)
self.mol.add_site_property("magmom", [3, 2, 2, 2, 2])
self.assertEqual(self.mol[0].charge, 4.1)
self.assertEqual(self.mol[0].magmom, 3)
def test_to_from_dict(self):
d = self.mol.as_dict()
mol2 = Molecule.from_dict(d)
self.assertEqual(type(mol2), Molecule)
def test_apply_operation(self):
op = SymmOp.from_axis_angle_and_translation([0, 0, 1], 90)
self.mol.apply_operation(op)
self.assertArrayAlmostEqual(self.mol[2].coords,
[0.000000, 1.026719, -0.363000])
def test_substitute(self):
coords = [[0.000000, 0.000000, 1.08],
[0.000000, 0.000000, 0.000000],
[1.026719, 0.000000, -0.363000],
[-0.513360, -0.889165, -0.363000],
[-0.513360, 0.889165, -0.363000]]
sub = Molecule(["X", "C", "H", "H", "H"], coords)
self.mol.substitute(1, sub)
self.assertAlmostEqual(self.mol.get_distance(0, 4), 1.54)
f = Molecule(["X", "F"], [[0, 0, 0], [0, 0, 1.11]])
self.mol.substitute(2, f)
self.assertAlmostEqual(self.mol.get_distance(0, 7), 1.35)
oh = Molecule(["X", "O", "H"],
[[0, 0.780362, -.456316], [0, 0, .114079],
[0, -.780362, -.456316]])
self.mol.substitute(1, oh)
self.assertAlmostEqual(self.mol.get_distance(0, 7), 1.43)
self.mol.substitute(3, "methyl")
self.assertEqual(self.mol.formula, "H7 C3 O1 F1")
coords = [[0.00000, 1.40272, 0.00000],
[0.00000, 2.49029, 0.00000],
[-1.21479, 0.70136, 0.00000],
[-2.15666, 1.24515, 0.00000],
[-1.21479, -0.70136, 0.00000],
[-2.15666, -1.24515, 0.00000],
[0.00000, -1.40272, 0.00000],
[0.00000, -2.49029, 0.00000],
[1.21479, -0.70136, 0.00000],
[2.15666, -1.24515, 0.00000],
[1.21479, 0.70136, 0.00000],
[2.15666, 1.24515, 0.00000]]
benzene = Molecule(["C", "H", "C", "H", "C", "H", "C", "H", "C", "H",
"C", "H"], coords)
benzene.substitute(1, sub)
self.assertEqual(benzene.formula, "H8 C7")
#Carbon attached should be in plane.
self.assertAlmostEqual(benzene[11].coords[2], 0)
def test_to_from_file_string(self):
for fmt in ["xyz", "json", "g03"]:
s = self.mol.to(fmt=fmt)
self.assertIsNotNone(s)
m = Molecule.from_str(s, fmt=fmt)
self.assertEqual(m, self.mol)
self.assertIsInstance(m, Molecule)
self.mol.to(filename="CH4_testing.xyz")
self.assertTrue(os.path.exists("CH4_testing.xyz"))
os.remove("CH4_testing.xyz")
if __name__ == '__main__':
import unittest
unittest.main()
|
tallakahath/pymatgen
|
pymatgen/core/tests/test_structure.py
|
Python
|
mit
| 50,592
|
[
"pymatgen"
] |
1b8ff834565d21c769970c7514e3d7b804b70971545062188d69c6cc83e40eb6
|
from pkg_resources import resource_string
from json import loads
# Error codes are provided as a convience to Galaxy API clients, but at this
# time they do represent part of the more stable interface. They can change
# without warning between releases.
UNKNOWN_ERROR_MESSAGE = "Unknown error occurred while processing request."
class ErrorCode( object ):
def __init__( self, code, default_error_message ):
self.code = code
self.default_error_message = default_error_message or UNKNOWN_ERROR_MESSAGE
def __str__( self ):
return str( self.default_error_message )
def __int__( self ):
return int( self.code )
@staticmethod
def from_dict( entry ):
name = entry.get("name")
code = entry.get("code")
message = entry.get("message")
return ( name, ErrorCode( code, message ) )
error_codes_json = resource_string( __name__, 'error_codes.json' )
for entry in loads( error_codes_json ):
name, error_code_obj = ErrorCode.from_dict( entry )
globals()[ name ] = error_code_obj
|
mikel-egana-aranguren/SADI-Galaxy-Docker
|
galaxy-dist/lib/galaxy/exceptions/error_codes.py
|
Python
|
gpl-3.0
| 1,064
|
[
"Galaxy"
] |
d561678c3543918083c6b9e01545c2c611fc99d78ec14cf95db9eb9a15f4d87a
|
#
def cutout(infile,mag,color='red'):
import os, utilities
ppid = str(os.getppid())
print ppid + 'a'
#pylab.show()
outfile = raw_input('name of output file?')
color = raw_input('color of regions?')
limits = ['lower_mag','upper_mag','lower_diff','upper_diff']
lim_dict = {}
for lim in limits:
print lim + '?'
b = raw_input()
lim_dict[lim] = b
utilities.run('ldacfilter -i ' + infile + ' -t PSSC\
-c "(((SEx_' + mag + '>' + str(lim_dict['lower_mag']) + ') AND (SEx_' + mag + '<' + str(lim_dict['upper_mag']) + ')) AND (magdiff>' + str(lim_dict['lower_diff']) + ')) AND (magdiff<' + str(lim_dict['upper_diff']) + ');"\
-o cutout1.' + ppid,['cutout1.' + ppid])
utilities.run('ldactoasc -b -q -i cutout1.' + ppid + ' -t PSSC\
-k Ra Dec > /tmp/' + outfile,[outfile])
utilities.run('mkreg.pl -c -rad 8 -xcol 0 -ycol 1 -wcs -colour ' + color + ' /tmp/' + outfile)
def get_median(cat,key):
import astropy.io.fits as pyfits, sys, os, re, string, copy
p = pyfits.open(cat)
magdiff = p[1].data.field(key)
magdiff.sort()
return magdiff[int(len(magdiff)/2)]
def coordinate_limits(cat):
import astropy.io.fits as pyfits, sys, os, re, string, copy
p = pyfits.open(cat)
ra = p[2].data.field('ALPHA_J2000')
ra.sort()
dec = p[2].data.field('DELTA_J2000')
dec.sort()
return ra[0],ra[-1],dec[0],dec[-1]
def combine_cats(cats,outfile,search_params):
#cats = [{'im_type': 'DOMEFLAT', 'cat': '' + search_params['TEMPDIR'] + '/SUPA0005188_3OCFS.DOMEFLAT.fixwcs.rawconv'}, {'im_type': 'SKYFLAT', 'cat': '' + search_params['TEMPDIR'] + '/SUPA0005188_3OCFS.SKYFLAT.fixwcs.rawconv'}, {'im_type': 'OCIMAGE', 'cat': '' + search_params['TEMPDIR'] + '/SUPA0005188_3OCFS.OCIMAGE.fixwcs.rawconv'}]
#outfile = '' + search_params['TEMPDIR'] + 'stub'
#cats = [{'im_type': 'MAIN', 'cat': '' + search_params['TEMPDIR'] + '/SUPA0005188_3OCFS..fixwcs.rawconv'}, {'im_type': 'D', 'cat': '' + search_params['TEMPDIR'] + '/SUPA0005188_3OCFS.D.fixwcs.rawconv'}]
import astropy.io.fits as pyfits, sys, os, re, string, copy
from config_bonn import cluster, tag, arc, filters
ppid = str(os.getppid())
tables = {}
colset = 0
cols = []
for catalog in cats:
file = catalog['cat']
os.system('mkdir ' + search_params['TEMPDIR'] )
os.system('ldactoasc -i ' + catalog['cat'] + ' -b -s -k MAG_APER MAGERR_APER -t OBJECTS > ' + search_params['TEMPDIR'] + '/APER')
os.system('asctoldac -i ' + search_params['TEMPDIR'] + '/APER -o ' + search_params['TEMPDIR'] + '/cat1 -t OBJECTS -c ./photconf/MAG_APER.conf')
os.system('ldacjoinkey -i ' + catalog['cat'] + ' -p ' + search_params['TEMPDIR'] + '/cat1 -o ' + search_params['TEMPDIR'] + '/all.conv' + catalog['im_type'] + ' -k MAG_APER1 MAG_APER2 MAGERR_APER1 MAGERR_APER2')
tables[catalog['im_type']] = pyfits.open(search_params['TEMPDIR'] + '/all.conv' + catalog['im_type'])
#if filter == filters[0]:
# tables['notag'] = pyfits.open('' + search_params['TEMPDIR'] + 'all.conv' )
for catalog in cats:
for i in range(len(tables[catalog['im_type']][1].columns)):
print catalog['im_type'], catalog['cat']
#raw_input()
if catalog['im_type'] != '':
tables[catalog['im_type']][1].columns[i].name = tables[catalog['im_type']][1].columns[i].name + catalog['im_type']
else:
tables[catalog['im_type']][1].columns[i].name = tables[catalog['im_type']][1].columns[i].name
cols.append(tables[catalog['im_type']][1].columns[i])
print cols
print len(cols)
hdu = pyfits.PrimaryHDU()
hduIMHEAD = pyfits.BinTableHDU.from_columns(tables[catalog['im_type']][2].columns)
hduOBJECTS = pyfits.BinTableHDU.from_columns(cols)
hdulist = pyfits.HDUList([hdu])
hdulist.append(hduIMHEAD)
hdulist.append(hduOBJECTS)
hdulist[1].header['EXTNAME']='FIELDS'
hdulist[2].header['EXTNAME']='OBJECTS'
print file
os.system('rm ' + outfile)
import re
res = re.split('/',outfile)
os.system('mkdir -p ' + reduce(lambda x,y: x + '/' + y,res[:-1]))
hdulist.writeto(outfile)
print outfile , '#########'
print 'done'
def paste_cats(cats,outfile): #cats,outfile,search_params):
#outfile = '/tmp/test.cat'
#cats = ['/tmp/15464/SUPA0028506_1OCFS.newpos', '/tmp/15464/SUPA0028506_9OCFS.newpos']
#print outfile, cats
import astropy.io.fits as pyfits, sys, os, re, string, copy
from config_bonn import cluster, tag, arc, filters
ppid = str(os.getppid())
tables = {}
colset = 0
cols = []
table = pyfits.open(cats[0])
data = []
nrows = 0
for catalog in cats:
cattab = pyfits.open(catalog)
nrows += cattab[2].data.shape[0]
hduOBJECTS = pyfits.BinTableHDU.from_columns(table[2].columns, nrows=nrows)
rowstart = 0
rowend = 0
for catalog in cats:
cattab = pyfits.open(catalog)
rowend += cattab[2].data.shape[0]
for i in range(len(cattab[2].columns)):
hduOBJECTS.data.field(i)[rowstart:rowend]=cattab[2].data.field(i)
rowstart = rowend
# update SeqNr
print rowend,len( hduOBJECTS.data.field('SeqNr')), len(range(1,rowend+1))
hduOBJECTS.data.field('SeqNr')[0:rowend]=range(1,rowend+1)
#hdu[0].header['EXTNAME']='FIELDS'
hduIMHEAD = pyfits.BinTableHDU.from_columns(table[1])
print cols
print len(cols)
hdu = pyfits.PrimaryHDU()
hdulist = pyfits.HDUList([hdu])
hdulist.append(hduIMHEAD)
hdulist.append(hduOBJECTS)
hdulist[1].header['EXTNAME']='FIELDS'
hdulist[2].header['EXTNAME']='OBJECTS'
print file
os.system('rm ' + outfile)
hdulist.writeto(outfile)
print outfile , '#########'
print 'done'
def imstats(SUPA,FLAT_TYPE):
import os, re, utilities, bashreader, sys, string
from copy import copy
from glob import glob
dict = get_files(SUPA,FLAT_TYPE)
search_params = initialize(dict['filter'],dict['cluster'])
search_params.update(dict)
path='/nfs/slac/g/ki/ki05/anja/SUBARU/%(cluster)s/' % {'cluster':search_params['cluster']}
print dict['files']
import commands
tmp_dicts = []
for file in dict['files']:
op = commands.getoutput('imstats ' + dict['files'][0])
print op
res = re.split('\n',op)
for line in res:
if string.find(line,'filename') != -1:
line = line.replace('# imstats: ','')
res2 = re.split('\t',line)
res3 = re.split('\s+',res[-1])
tmp_dict = {}
for i in range(len(res3)):
tmp_dict[res2[i]] = res3[i]
tmp_dicts.append(tmp_dict)
print tmp_dicts
median_average = 0
sigma_average = 0
for d in tmp_dicts:
print d.keys()
sigma_average += float(d['sigma'])
median_average += float(d['median'])
dict['sigma_average'] = sigma_average / len(tmp_dicts)
dict['median_average'] = median_average / len(tmp_dicts)
print dict['sigma_average'], dict['median_average']
save_exposure(dict,SUPA,FLAT_TYPE)
def save_fit(fits,im_type,type,SUPA,FLAT_TYPE):
import MySQLdb, sys, os, re, time
from copy import copy
db2 = MySQLdb.connect(db='subaru', user='weaklensing', passwd='darkmatter', host='ki-rh8')
c = db2.cursor()
for fit in fits:
#which_solution += 1
user_name = os.environ['USER']
time_now = time.asctime()
user = user_name #+ str(time.time())
dict = {}
#copy array but exclude lists
for ele in fit['class'].fitvars.keys():
if ele != 'condition' and ele != 'model_name' and ele != 'fixed_name':
dict[ele + '_' + type + '_' + im_type] = fit['class'].fitvars[ele]
save_exposure(dict,SUPA,FLAT_TYPE)
def select_analyze(cluster):
import MySQLdb, sys, os, re, time
from copy import copy
db2 = MySQLdb.connect(db='subaru', user='weaklensing', passwd='darkmatter', host='ki-rh8')
c = db2.cursor()
command = "DESCRIBE illumination_db"
print command
c.execute(command)
results = c.fetchall()
keys = []
for line in results:
keys.append(line[0])
print keys
command = "SELECT * from illumination_db where zp_err_galaxy_D is null and PPRUN='2002-06-04_W-J-V'" # where cluster='HDFN' and filter='W-J-V' and ROTATION=0"
command = "SELECT * from illumination_db where matched_cat_star is null" # where cluster='HDFN' and filter='W-J-V' and ROTATION=0"
#command = "select * from illumination_db where SUPA='SUPA0028506'"
command = "select * from illumination_db where cluster='MACS0417-11' and OBJECT like '%0417c%' and color1_star_ is null"
print command
c.execute(command)
results = c.fetchall()
print len(results)
dicts = []
for j in range(len(results)):
dict = {}
print j, len(results)
for i in range(len(results[j])):
dict[keys[i]] = results[j][i]
print dict['SUPA'], dict['file'], dict['cluster'], dict['pasted_cat'], dict['matched_cat_star']
#good = raw_input()
d_update = get_files(dict['SUPA'],dict['FLAT_TYPE'])
go = 0
if d_update.has_key('TRIED'):
if d_update['TRIED'] != 'YES':
go = 1
else: go = 1
if 1: #go:
save_exposure({'TRIED':'YES'},dict['SUPA'],dict['FLAT_TYPE'])
analyze(dict['SUPA'],dict['FLAT_TYPE'])
def analyze(SUPA,FLAT_TYPE):
#try:
import sys
import os
#os.system('rm -rf ' + search_params['TEMPDIR'] + '*')
ppid = str(os.getppid())
#try:
if 1:
#imstats(SUPA,FLAT_TYPE)
#find_seeing(SUPA,FLAT_TYPE)
#length(SUPA,FLAT_TYPE)
#sextract(SUPA,FLAT_TYPE)
#match_simple(SUPA,FLAT_TYPE)
phot(SUPA,FLAT_TYPE)
#except KeyboardInterrupt:
# raise
#except:
# ppid_loc = str(os.getppid())
# print sys.exc_info()
# print 'something else failed',ppid, ppid_loc
#
# if ppid_loc != ppid: sys.exit(0)
# os.system('rm -rf /tmp/' + ppid)
#
# os.system('rm -rf /tmp/' + ppid)
#
def get_files(SUPA,FLAT_TYPE):
import MySQLdb, sys, os, re
db2 = MySQLdb.connect(db='subaru', user='weaklensing', passwd='darkmatter', host='ki-rh8')
c = db2.cursor()
command = "DESCRIBE illumination_db"
print command
c.execute(command)
results = c.fetchall()
keys = []
for line in results:
keys.append(line[0])
command = "SELECT * from illumination_db where SUPA='" + SUPA + "' AND FLAT_TYPE='" + FLAT_TYPE + "'"
print command
c.execute(command)
results = c.fetchall()
dict = {}
for i in range(len(results[0])):
dict[keys[i]] = results[0][i]
print dict
file_pat = dict['file']
print file_pat
import re, glob
res = re.split('_\d+O',file_pat)
pattern = res[0] + '_*O' + res[1]
print pattern
files = glob.glob(pattern)
dict['files'] = files
print files
return dict
def save_exposure(dict,SUPA=None,FLAT_TYPE=None):
if SUPA != None and FLAT_TYPE != None:
dict['SUPA'] = SUPA
dict['FLAT_TYPE'] = FLAT_TYPE
import MySQLdb, sys, os, re
db2 = MySQLdb.connect(db='subaru', user='weaklensing', passwd='darkmatter', host='ki-rh8')
c = db2.cursor()
command = "CREATE TABLE IF NOT EXISTS illumination_db ( id MEDIUMINT NOT NULL AUTO_INCREMENT, PRIMARY KEY (id))"
print command
#c.execute("DROP TABLE IF EXISTS illumination_db")
#c.execute(command)
import MySQLdb, sys, os, re
db2 = MySQLdb.connect(db='subaru', user='weaklensing', passwd='darkmatter', host='ki-rh8')
c = db2.cursor()
from copy import copy
floatvars = {}
stringvars = {}
#copy array but exclude lists
import string
letters = string.ascii_lowercase + string.ascii_uppercase.replace('E','') + '_' + '-'
for ele in dict.keys():
type = 'float'
for l in letters:
if string.find(str(dict[ele]),l) != -1:
type = 'string'
if type == 'float':
floatvars[ele] = str(float(dict[ele]))
elif type == 'string':
stringvars[ele] = dict[ele]
# make database if it doesn't exist
print 'floatvars', floatvars
print 'stringvars', stringvars
for column in stringvars:
try:
command = 'ALTER TABLE illumination_db ADD ' + column + ' varchar(240)'
c.execute(command)
except: nope = 1
for column in floatvars:
try:
command = 'ALTER TABLE illumination_db ADD ' + column + ' float(30)'
c.execute(command)
except: nope = 1
# insert new observation
SUPA = dict['SUPA']
flat = dict['FLAT_TYPE']
c.execute("SELECT SUPA from illumination_db where SUPA = '" + SUPA + "' and flat_type = '" + flat + "'")
results = c.fetchall()
print results
if len(results) > 0:
print 'already added'
else:
command = "INSERT INTO illumination_db (SUPA,FLAT_TYPE) VALUES ('" + dict['SUPA'] + "','" + dict['FLAT_TYPE'] + "')"
print command
c.execute(command)
import commands
vals = ''
for key in stringvars.keys():
print key, stringvars[key]
vals += ' ' + key + "='" + str(stringvars[key]) + "',"
for key in floatvars.keys():
print key, floatvars[key]
vals += ' ' + key + "='" + floatvars[key] + "',"
vals = vals[:-1]
command = "UPDATE illumination_db set " + vals + " WHERE SUPA='" + dict['SUPA'] + "' AND FLAT_TYPE='" + dict['FLAT_TYPE'] + "'"
print command
c.execute(command)
print vals
#names = reduce(lambda x,y: x + ',' + y, [x for x in floatvars.keys()])
#values = reduce(lambda x,y: str(x) + ',' + str(y), [floatvars[x] for x in floatvars.keys()])
#names += ',' + reduce(lambda x,y: x + ',' + y, [x for x in stringvars.keys()])
#values += ',' + reduce(lambda x,y: x + ',' + y, ["'" + str(stringvars[x]) + "'" for x in stringvars.keys()])
#command = "INSERT INTO illumination_db (" + names + ") VALUES (" + values + ")"
#print command
#os.system(command)
def initialize(filter,cluster):
import os, re, bashreader, sys, string, utilities
from glob import glob
from copy import copy
dict = bashreader.parseFile('progs.ini')
for key in dict.keys():
os.environ[key] = str(dict[key])
import os
ppid = str(os.getppid())
PHOTCONF = './photconf/'
TEMPDIR = '/tmp/' + ppid + '/'
os.system('mkdir ' + TEMPDIR)
path='/nfs/slac/g/ki/ki05/anja/SUBARU/%(cluster)s/' % {'cluster':cluster}
search_params = {'path':path, 'cluster':cluster, 'filter':filter, 'PHOTCONF':PHOTCONF, 'DATACONF':os.environ['DATACONF'], 'TEMPDIR':TEMPDIR}
return search_params
def update_dict(SUPA,FLAT_TYPE):
import utilities
dict = get_files(SUPA,FLAT_TYPE)
print dict['file']
kws = utilities.get_header_kw(dict['file'],['ROTATION','OBJECT','GABODSID','CONFIG','EXPTIME','AIRMASS','INSTRUM','PPRUN','BADCCD']) # return KEY/NA if not SUBARU
save_exposure(kws,SUPA,FLAT_TYPE)
def gather_exposures(cluster,filters=None):
if not filters:
filters = ['B','W-J-B','W-J-V','W-C-RC','W-C-IC','I','W-S-Z+']
for filter in filters:
search_params = initialize(filter,cluster)
import os, re, bashreader, sys, string, utilities
from glob import glob
from copy import copy
searchstr = "/%(path)s/%(filter)s/SCIENCE/*fits" % search_params
print searchstr
files = glob(searchstr)
files.sort()
#print files
exposures = {}
# first 30 files
#print files[0:30]
import MySQLdb, sys, os, re
db2 = MySQLdb.connect(db='subaru', user='weaklensing', passwd='darkmatter', host='ki-rh8')
c = db2.cursor()
for file in files:
if string.find(file,'wcs') == -1 and string.find(file,'.sub.fits') == -1:
res = re.split('_',re.split('/',file)[-1])
exp_name = res[0]
if not exposures.has_key(exp_name): exposures[exp_name] = {'images':[],'keywords':{}}
exposures[exp_name]['images'].append(file) # exp_name is the root of the image name
if len(exposures[exp_name]['keywords'].keys()) == 0: #not exposures[exp_name]['keywords'].has_key('ROTATION'): #if exposure does not have keywords yet, then get them
exposures[exp_name]['keywords']['filter'] = filter
exposures[exp_name]['keywords']['file'] = file
res2 = re.split('/',file)
for r in res2:
if string.find(r,filter) != -1:
print r
exposures[exp_name]['keywords']['date'] = r.replace(filter + '_','')
exposures[exp_name]['keywords']['fil_directory'] = r
search_params['fil_directory'] = r
kws = utilities.get_header_kw(file,['CRVAL1','CRVAL2','ROTATION','OBJECT','GABODSID','CONFIG','EXPTIME','AIRMASS','INSTRUM','PPRUN','BADCCD']) # return KEY/NA if not SUBARU
''' figure out a way to break into SKYFLAT, DOMEFLAT '''
ppid = str(os.getppid())
command = 'dfits ' + file + ' > ' + search_params['TEMPDIR'] + '/header'
utilities.run(command)
file = open('' + search_params['TEMPDIR'] + 'header','r').read()
import string
if string.find(file,'SKYFLAT') != -1: exposures[exp_name]['keywords']['FLAT_TYPE'] = 'SKYFLAT'
elif string.find(file,'DOMEFLAT') != -1: exposures[exp_name]['keywords']['FLAT_TYPE'] = 'DOMEFLAT'
#print file, exposures[exp_name]['keywords']['FLAT_TYPE']
file = open('' + search_params['TEMPDIR'] + 'header','r').readlines()
import string
for line in file:
print line
if string.find(line,'Flat frame:') != -1 and string.find(line,'illum') != -1:
import re
res = re.split('SET',line)
if len(res) > 1:
res = re.split('_',res[1])
set = res[0]
exposures[exp_name]['keywords']['FLAT_SET'] = set
res = re.split('illum',line)
res = re.split('\.',res[1])
smooth = res[0]
exposures[exp_name]['keywords']['SMOOTH'] = smooth
break
for kw in kws.keys():
exposures[exp_name]['keywords'][kw] = kws[kw]
exposures[exp_name]['keywords']['SUPA'] = exp_name
exposures[exp_name]['keywords']['cluster'] = cluster
print exposures[exp_name]['keywords']
save_exposure(exposures[exp_name]['keywords'])
return exposures
def find_seeing(SUPA,FLAT_TYPE):
import os, re, utilities, sys
from copy import copy
dict = get_files(SUPA,FLAT_TYPE)
print dict['file']
search_params = initialize(dict['filter'],dict['cluster'])
search_params.update(dict)
print dict['files']
#params PIXSCALE GAIN
''' quick run through for seeing '''
children = []
for image in search_params['files']:
child = os.fork()
if child:
children.append(child)
else:
params = copy(search_params)
ROOT = re.split('\.',re.split('\/',image)[-1])[0]
params['ROOT'] = ROOT
NUM = re.split('O',re.split('\_',ROOT)[1])[0]
params['NUM'] = NUM
print ROOT
weightim = "/%(path)s/%(fil_directory)s/WEIGHTS/%(ROOT)s.weight.fits" % params
#flagim = "/%(path)s/%(fil_directory)s/WEIGHTS/globalflag_%(NUM)s.fits" % params
#finalflagim = TEMPDIR + "flag_%(ROOT)s.fits" % params
params['finalflagim'] = weightim
#os.system('rm ' + finalflagim)
#command = "ic -p 16 '1 %2 %1 0 == ?' " + weightim + " " + flagim + " > " + finalflagim
#utilities.run(command)
command = "nice sex %(file)s -c %(PHOTCONF)s/singleastrom.conf.sex \
-FLAG_IMAGE ''\
-FLAG_TYPE MAX\
-CATALOG_NAME %(TEMPDIR)s/seeing_%(ROOT)s.cat \
-FILTER_NAME %(PHOTCONF)s/default.conv\
-CATALOG_TYPE 'ASCII' \
-DETECT_MINAREA 8 -DETECT_THRESH 8.\
-ANALYSIS_THRESH 8 \
-WEIGHT_IMAGE /%(path)s/%(fil_directory)s/WEIGHTS/%(ROOT)s.weight.fits\
-WEIGHT_TYPE MAP_WEIGHT\
-PARAMETERS_NAME %(PHOTCONF)s/singleastrom.ascii.flag.sex" % params
print command
os.system(command)
sys.exit(0)
for child in children:
os.waitpid(child,0)
command = 'cat ' + search_params['TEMPDIR'] + 'seeing_' + SUPA + '*cat > ' + search_params['TEMPDIR'] + 'paste_seeing_' + SUPA + '.cat'
utilities.run(command)
file_seeing = search_params['TEMPDIR'] + '/paste_seeing_' + SUPA + '.cat'
PIXSCALE = float(search_params['PIXSCALE'])
reload(utilities)
fwhm = utilities.calc_seeing(file_seeing,10,PIXSCALE)
save_exposure({'fwhm':fwhm},SUPA,FLAT_TYPE)
print file_seeing, SUPA, PIXSCALE
def length(SUPA,FLAT_TYPE):
import os, re, utilities, bashreader, sys, string
from copy import copy
from glob import glob
dict = get_files(SUPA,FLAT_TYPE)
search_params = initialize(dict['filter'],dict['cluster'])
search_params.update(dict)
path='/nfs/slac/g/ki/ki05/anja/SUBARU/%(cluster)s/' % {'cluster':search_params['cluster']}
''' get the CRPIX values '''
start = 1
#CRPIXZERO is at the chip at the bottom left and so has the greatest value!!!!
for image in search_params['files']:
print image
res = re.split('\_\d+',re.split('\/',image)[-1])
#print res
imroot = "/%(path)s/%(fil_directory)s/SCIENCE/" % search_params
im = imroot + res[0] + '_1' + res[1]
#print im
crpix = utilities.get_header_kw(image,['CRPIX1','CRPIX2','NAXIS1','NAXIS2'])
if start == 1:
crpixzero = copy(crpix)
crpixhigh = copy(crpix)
start = 0
from copy import copy
print float(crpix['CRPIX1']) < float(crpixzero['CRPIX1']), float(crpix['CRPIX2']) < float(crpixzero['CRPIX2'])
if float(crpix['CRPIX1']) + 50 >= float(crpixzero['CRPIX1']) and float(crpix['CRPIX2']) +50 >= float(crpixzero['CRPIX2']):
crpixzero = copy(crpix)
if float(crpix['CRPIX1']) - 50 <= float(crpixhigh['CRPIX1']) and float(crpix['CRPIX2']) - 50 <= float(crpixhigh['CRPIX2']):
crpixhigh = copy(crpix)
print crpix['CRPIX1'], crpix['CRPIX2'], crpixzero['CRPIX1'], crpixzero['CRPIX2'], crpixhigh['CRPIX1'], crpixhigh['CRPIX2']#, crpixhigh
LENGTH1 = abs(float(crpixhigh['CRPIX1']) - float(crpixzero['CRPIX1'])) + float(crpix['NAXIS1'])
LENGTH2 = abs(float(crpixhigh['CRPIX2']) - float(crpixzero['CRPIX2'])) + float(crpix['NAXIS2'])
print LENGTH1, LENGTH2, crpixzero['CRPIX1'], crpixzero['CRPIX2'], crpixhigh['CRPIX1'], crpixhigh['CRPIX2']#, crpixhigh
save_exposure({'crfixed':'third','LENGTH1':LENGTH1,'LENGTH2':LENGTH2,'CRPIX1ZERO':crpixzero['CRPIX1'],'CRPIX2ZERO':crpixzero['CRPIX2']},SUPA,FLAT_TYPE)
def sextract(SUPA,FLAT_TYPE):
import os, re, utilities, bashreader, sys, string
from copy import copy
from glob import glob
dict = get_files(SUPA,FLAT_TYPE)
search_params = initialize(dict['filter'],dict['cluster'])
search_params.update(dict)
path='/nfs/slac/g/ki/ki05/anja/SUBARU/%(cluster)s/' % {'cluster':search_params['cluster']}
subpath='/nfs/slac/g/ki/ki05/anja/SUBARU/'
children = []
print search_params
kws = utilities.get_header_kw(search_params['files'][0],['PPRUN'])
print kws['PPRUN']
pprun = kws['PPRUN']
#fs = glob.glob(subpath+pprun+'/SCIENCE_DOMEFLAT*.tarz')
#if len(fs) > 0:
# os.system('tar xzvf ' + fs[0])
#fs = glob.glob(subpath+pprun+'/SCIENCE_SKYFLAT*.tarz')
#if len(fs) > 0:
# os.system('tar xzvf ' + fs[0])
search_params['files'].sort()
if 1:
print search_params['files']
for image in search_params['files']:
print image
child = os.fork()
if child:
children.append(child)
else:
try:
params = copy(search_params)
ROOT = re.split('\.',re.split('\/',image)[-1])[0]
params['ROOT'] = ROOT
BASE = re.split('O',ROOT)[0]
params['BASE'] = BASE
NUM = re.split('O',re.split('\_',ROOT)[1])[0]
params['NUM'] = NUM
print NUM, BASE, ROOT
params['GAIN'] = 2.50 ## WARNING!!!!!!
print ROOT
finalflagim = "%(TEMPDIR)sflag_%(ROOT)s.fits" % params
weightim = "/%(path)s/%(fil_directory)s/WEIGHTS/%(ROOT)s.weight.fits" % params
#flagim = "/%(path)s/%(fil_directory)s/WEIGHTS/globalflag_%(NUM)s.fits" % params
#finalflagim = TEMPDIR + "flag_%(ROOT)s.fits" % params
params['finalflagim'] = weightim
im = "/%(path)s/%(fil_directory)s/SCIENCE/%(ROOT)s.fits" % params
crpix = utilities.get_header_kw(im,['CRPIX1','CRPIX2'])
SDSS1 = "/%(path)s/%(fil_directory)s/SCIENCE/headers_scamp_SDSS-R6/%(BASE)s.head" % params
SDSS2 = "/%(path)s/%(fil_directory)s/SCIENCE/headers_scamp_SDSS-R6/%(BASE)sO*.head" % params
from glob import glob
print glob(SDSS1), glob(SDSS2)
head = None
if len(glob(SDSS1)) > 0:
head = glob(SDSS1)[0]
elif len(glob(SDSS2)) > 0:
head = glob(SDSS2)[0]
if head is None:
command = "sex /%(path)s/%(fil_directory)s/SCIENCE/%(ROOT)s.fits -c %(PHOTCONF)s/phot.conf.sex \
-PARAMETERS_NAME %(PHOTCONF)s/phot.param.sex \
-CATALOG_NAME %(TEMPDIR)s/%(ROOT)s.cat \
-FILTER_NAME %(DATACONF)s/default.conv\
-FILTER Y \
-FLAG_TYPE MAX\
-FLAG_IMAGE ''\
-SEEING_FWHM %(fwhm).3f \
-DETECT_MINAREA 3 -DETECT_THRESH 3 -ANALYSIS_THRESH 3 \
-MAG_ZEROPOINT 27.0 \
-GAIN %(GAIN).3f \
-WEIGHT_IMAGE /%(path)s/%(fil_directory)s/WEIGHTS/%(ROOT)s.weight.fits\
-WEIGHT_TYPE MAP_WEIGHT" % params
#-CHECKIMAGE_TYPE BACKGROUND,APERTURES,SEGMENTATION\
#-CHECKIMAGE_NAME /%(path)s/%(fil_directory)s/PHOTOMETRY/coadd.background.fits,/%(path)s/%(fil_directory)s/PHOTOMETRY/coadd.apertures.fits,/%(path)s/%(fil_directory)s/PHOTOMETRY/coadd.segmentation.fits\
catname = "%(TEMPDIR)s/%(ROOT)s.cat" % params
filtcatname = "%(TEMPDIR)s/%(ROOT)s.filt.cat" % params
print command
utilities.run(command,[catname])
utilities.run('ldacfilter -i ' + catname + ' -o ' + filtcatname + ' -t LDAC_OBJECTS\
-c "(CLASS_STAR > 0.0);"',[filtcatname])
if len(glob(filtcatname)) > 0:
import commands
lines = commands.getoutput('ldactoasc -s -b -i ' + filtcatname + ' -t LDAC_OBJECTS | wc -l')
import re
res = re.split('\n',lines)
print lines
if int(res[-1]) == 0: sys.exit(0)
command = 'scamp ' + filtcatname + " -SOLVE_PHOTOM N -ASTREF_CATALOG SDSS-R6 -CHECKPLOT_TYPE NONE -WRITE_XML N "
print command
utilities.run(command)
head = "%(TEMPDIR)s/%(ROOT)s.filt.head" % params
#headfile = "%(TEMPDIR)s/%(ROOT)s.head" % params
print head
if head is not None:
hf = open(head,'r').readlines()
hdict = {}
for line in hf:
import re
if string.find(line,'=') != -1:
res = re.split('=',line)
name = res[0].replace(' ','')
res = re.split('/',res[1])
value = res[0].replace(' ','')
print name, value
hdict[name] = value
imfix = "%(TEMPDIR)s/%(ROOT)s.fixwcs.fits" % params
print imfix
os.system('mkdir ' + search_params['TEMPDIR'])
command = "cp " + im + " " + imfix
print command
utilities.run(command)
import commands
out = commands.getoutput('gethead ' + imfix + ' CRPIX1 CRPIX2')
import re
res = re.split('\s+',out)
os.system('sethead ' + imfix + ' CRPIX1OLD=' + res[0])
os.system('sethead ' + imfix + ' CRPIX2OLD=' + res[1])
for name in ['CRVAL1','CRVAL2','CD1_1','CD1_2','CD2_1','CD2_2','CRPIX1','CRPIX2']:
command = 'sethead ' + imfix + ' ' + name + '=' + hdict[name]
print command
os.system(command)
main_file = '%(TEMPDIR)s/%(ROOT)s.fixwcs.fits' % params
doubles_raw = [{'file_pattern':main_file,'im_type':''},
{'file_pattern':subpath+pprun+'/SCIENCE_DOMEFLAT*/'+BASE+'OC*.fits','im_type':'D'},
{'file_pattern':subpath+pprun+'/SCIENCE_SKYFLAT*/'+BASE+'OC*.fits','im_type':'S'}]
#{'file_pattern':subpath+pprun+'/SCIENCE/OC_IMAGES/'+BASE+'OC*.fits','im_type':'OC'}
# ]
print doubles_raw
doubles_output = []
print doubles_raw
for double in doubles_raw:
file = glob(double['file_pattern'])
if len(file) > 0:
params.update(double)
params['double_cat'] = '%(TEMPDIR)s/%(ROOT)s.%(im_type)s.fixwcs.cat' % params
params['file_double'] = file[0]
command = "nice sex %(TEMPDIR)s%(ROOT)s.fixwcs.fits,%(file_double)s -c %(PHOTCONF)s/phot.conf.sex \
-PARAMETERS_NAME %(PHOTCONF)s/phot.param.sex \
-CATALOG_NAME %(double_cat)s \
-FILTER_NAME %(DATACONF)s/default.conv\
-FILTER Y \
-FLAG_TYPE MAX\
-FLAG_IMAGE ''\
-SEEING_FWHM %(fwhm).3f \
-DETECT_MINAREA 3 -DETECT_THRESH 3 -ANALYSIS_THRESH 3 \
-MAG_ZEROPOINT 27.0 \
-GAIN %(GAIN).3f \
-WEIGHT_IMAGE /%(path)s/%(fil_directory)s/WEIGHTS/%(ROOT)s.weight.fits\
-WEIGHT_TYPE MAP_WEIGHT" % params
#-CHECKIMAGE_TYPE BACKGROUND,APERTURES,SEGMENTATION\
#-CHECKIMAGE_NAME /%(path)s/%(fil_directory)s/PHOTOMETRY/coadd.background.fits,/%(path)s/%(fil_directory)s/PHOTOMETRY/coadd.apertures.fits,/%(path)s/%(fil_directory)s/PHOTOMETRY/coadd.segmentation.fits\
catname = "%(TEMPDIR)s/%(ROOT)s.cat" % params
print command
utilities.run(command,[catname])
command = 'ldacconv -b 1 -c R -i ' + params['double_cat'] + ' -o ' + params['double_cat'].replace('cat','rawconv')
print command
utilities.run(command)
#command = 'ldactoasc -b -q -i ' + params['double_cat'].replace('cat','rawconv') + ' -t OBJECTS\
# -k ALPHA_J2000 DELTA_J2000 > ' + params['double_cat'].replace('cat','pos')
#print command
#utilities.run(command)
#print 'mkreg.pl -c -rad 8 -xcol 0 -ycol 1 -wcs -colour green ' + params['double_cat'].replace('cat','pos')
#utilities.run(command)
#print params['double_cat'].replace('cat','pos')
# Xpos_ABS is difference of CRPIX and zero CRPIX
doubles_output.append({'cat':params['double_cat'].replace('cat','rawconv'),'im_type':double['im_type']})
print doubles_output
print '***********************************'
outfile = params['TEMPDIR'] + params['ROOT'] + '.conv'
combine_cats(doubles_output,outfile,search_params)
#outfile_field = params['TEMPDIR'] + params['ROOT'] + '.field'
#command = 'ldacdeltab -i ' + outfile + ' -t FIELDS -o ' + outfile_field
#utilities.run(command)
command = 'ldactoasc -b -q -i ' + outfile + ' -t OBJECTS\
-k ALPHA_J2000 DELTA_J2000 > ' + outfile.replace('conv','pos')
print command
utilities.run(command)
command = 'mkreg.pl -c -rad 8 -xcol 0 -ycol 1 -wcs -colour green ' + outfile.replace('conv','pos')
print command
utilities.run(command)
print outfile
command = 'ldaccalc -i ' + outfile + ' -o ' + params['TEMPDIR'] + params['ROOT'] + '.newpos -t OBJECTS -c "(Xpos + ' + str(float(search_params['CRPIX1ZERO']) - float(crpix['CRPIX1'])) + ');" -k FLOAT -n Xpos_ABS "" -c "(Ypos + ' + str(float(search_params['CRPIX2ZERO']) - float(crpix['CRPIX2'])) + ');" -k FLOAT -n Ypos_ABS "" -c "(Ypos*0 + ' + str(params['NUM']) + ');" -k FLOAT -n CHIP "" '
print command
utilities.run(command)
except:
print sys.exc_info()
print 'finishing'
sys.exit(0)
sys.exit(0)
print children
for child in children:
print 'waiting for', child
os.waitpid(child,0)
print 'finished waiting'
pasted_cat = path + 'PHOTOMETRY/ILLUMINATION/' + 'pasted_' + SUPA + '_' + search_params['filter'] + '_' + str(search_params['ROTATION']) + '.cat'
from glob import glob
outcat = search_params['TEMPDIR'] + 'tmppaste_' + SUPA + '.cat'
newposlist = glob(search_params['TEMPDIR'] + SUPA + '*newpos')
print search_params['TEMPDIR'] + SUPA + '*newpos'
if len(newposlist) > 1:
#command = 'ldacpaste -i ' + search_params['TEMPDIR'] + SUPA + '*newpos -o ' + pasted_cat
#print command
files = glob(search_params['TEMPDIR'] + SUPA + '*newpos')
print files
paste_cats(files,pasted_cat)
else:
command = 'cp ' + newposlist[0] + ' ' + pasted_cat
utilities.run(command)
save_exposure({'pasted_cat':pasted_cat},SUPA,FLAT_TYPE)
#fs = glob.glob(subpath+pprun+'/SCIENCE_DOMEFLAT*.tarz'.replace('.tarz',''))
#if len(fs) > 0:
# os.system('tar xzvf ' + fs[0])
#fs = glob.glob(subpath+pprun+'/SCIENCE_SKYFLAT*.tarz'.replace('.tarz',''))
#fs = glob.glob(subpath+pprun+'/SCIENCE_SKYFLAT*.tarz')
#if len(fs) > 0:
# os.system('tar xzvf ' + fs[0])
#return exposures, LENGTH1, LENGTH2
def match_simple(SUPA,FLAT_TYPE):
dict = get_files(SUPA,FLAT_TYPE)
search_params = initialize(dict['filter'],dict['cluster'])
search_params.update(dict)
ROTATION = str(search_params['ROTATION']) #exposures[exposure]['keywords']['ROTATION']
import os
starcat ='/nfs/slac/g/ki/ki05/anja/SUBARU/%(cluster)s/PHOTOMETRY/sdssstar%(ROTATION)s.cat' % {'ROTATION':ROTATION,'cluster':search_params['cluster']}
galaxycat ='/nfs/slac/g/ki/ki05/anja/SUBARU/%(cluster)s/PHOTOMETRY/sdssgalaxy%(ROTATION)s.cat' % {'ROTATION':ROTATION,'cluster':search_params['cluster']}
path='/nfs/slac/g/ki/ki05/anja/SUBARU/%(cluster)s/' % {'cluster':search_params['cluster']}
illum_path='/nfs/slac/g/ki/ki05/anja/SUBARU/ILLUMINATION/' % {'cluster':search_params['cluster']}
#os.system('mkdir -p ' + path + 'PHOTOMETRY/ILLUMINATION/')
os.system('mkdir -p ' + path + 'PHOTOMETRY/ILLUMINATION/STAR/')
os.system('mkdir -p ' + path + 'PHOTOMETRY/ILLUMINATION/GALAXY/')
from glob import glob
print starcat
for type,cat in [['star',starcat],['galaxy',galaxycat]]:
catalog = search_params['pasted_cat'] #exposures[exposure]['pasted_cat']
ramin,ramax, decmin, decmax = coordinate_limits(catalog)
limits = {'ramin':ramin-0.2,'ramax':ramax+0.2,'decmin':decmin-0.2,'decmax':decmax+0.2}
print ramin,ramax, decmin, decmax
if len(glob(cat)) == 0:
#os.system('rm ' + cat)
image = search_params['files'][0]
print image
import retrieve_test
retrieve_test.run(image,cat,type,limits)
filter = search_params['filter'] #exposures[exposure]['keywords']['filter']
#GABODSID = exposures[exposure]['keywords']['GABODSID']
OBJECT = search_params['OBJECT'] #exposures[exposure]['keywords']['OBJECT']
print catalog
outcat = path + 'PHOTOMETRY/ILLUMINATION/' + type + '/' + 'matched_' + SUPA + '_' + filter + '_' + ROTATION + '_' + type + '.cat'
outcat_dir = path + 'PHOTOMETRY/ILLUMINATION/' + type + '/' + ROTATION + '/' + OBJECT + '/'
os.system('mkdir -p ' + outcat_dir)
file = 'matched_' + SUPA + '.cat'
linkdir = illum_path + '/' + filter + '/' + ROTATION + '/' + OBJECT + '/'
#outcatlink = linkdir + 'matched_' + exposure + '_' + cluster + '_' + GABODSID + '.cat'
outcatlink = linkdir + 'matched_' + SUPA + '_' + search_params['cluster'] + '_' + type + '.cat'
os.system('mkdir -p ' + linkdir)
os.system('rm ' + outcat)
command = 'match_simple.sh ' + catalog + ' ' + cat + ' ' + outcat
print command
os.system(command)
os.system('rm ' + outcatlink)
command = 'ln -s ' + outcat + ' ' + outcatlink
print command
os.system(command)
save_exposure({'matched_cat_' + type:outcat},SUPA,FLAT_TYPE)
print type, 'TYPE!'
print outcat, type
#exposures[exposure]['matched_cat_' + type] = outcat
#return exposures
def phot(SUPA,FLAT_TYPE):
dict = get_files(SUPA,FLAT_TYPE)
print dict.keys()
search_params = initialize(dict['filter'],dict['cluster'])
search_params.update(dict)
filter = dict['filter']
import utilities
info = {'B':{'filter':'g','color1':'gmr','color2':'umg','EXTCOEFF':-0.2104,'COLCOEFF':0.0},\
'W-J-B':{'filter':'g','color1':'gmr','color2':'umg','EXTCOEFF':-0.2104,'COLCOEFF':0.0},\
'W-J-V':{'filter':'g','color1':'gmr','color2':'rmi','EXTCOEFF':-0.1202,'COLCOEFF':0.0},\
'W-C-RC':{'filter':'r','color1':'rmi','color2':'gmr','EXTCOEFF':-0.0925,'COLCOEFF':0.0},\
'W-C-IC':{'filter':'i','color1':'imz','color2':'rmi','EXTCOEFF':-0.02728,'COLCOEFF':0.0},\
'W-S-Z+':{'filter':'z','color1':'imz','color2':'rmi','EXTCOEFF':0.0,'COLCOEFF':0.0}}
import mk_saturation_plot,os,re
os.environ['BONN_TARGET'] = search_params['cluster']
os.environ['INSTRUMENT'] = 'SUBARU'
stars_0 = []
stars_90 = []
ROTATION = dict['ROTATION']
print ROTATION
import os
ppid = str(os.getppid())
from glob import glob
for im_type in ['']: #,'D','S']:
for type in ['star']: #,'galaxy']:
file = dict['matched_cat_' + type]
print file
print file
if type == 'galaxy':
mag='MAG_AUTO' + im_type
magerr='MAGERR_AUTO' + im_type
class_star = "<0.9"
if type == 'star':
mag='MAG_APER2' + im_type
magerr='MAGERR_APER2' + im_type
class_star = ">0.9"
print 'filter', filter
os.environ['BONN_FILTER'] = filter
filt = re.split('_',filter)[0]
d = info[filt]
print file
utilities.run('ldacfilter -i ' + file + ' -o ' + search_params['TEMPDIR'] + 'good.stars' + ' -t PSSC\
-c "(Flag!=-99);"',['' + search_params['TEMPDIR'] + 'good.stars'])
utilities.run('ldacfilter -i ' + search_params['TEMPDIR'] + 'good.stars -o ' + search_params['TEMPDIR'] + 'good.colors -t PSSC\
-c "((((SEx_' + mag + '!=0 AND ' + d['color1'] + '<900) AND ' + d['color1'] + '!=0) AND ' + d['color1'] + '>-900) AND ' + d['color1'] + '!=0);"',['' + search_params['TEMPDIR'] + 'good.colors'])
print '' + search_params['TEMPDIR'] + 'good.colors'
utilities.run('ldaccalc -i ' + search_params['TEMPDIR'] + 'good.colors -t PSSC -c "(' + d['filter'] + 'mag - SEx_' + mag + ');" -k FLOAT -n magdiff "" -o ' + search_params['TEMPDIR'] + 'all.diffA.cat' ,[search_params['TEMPDIR'] + 'all.diffA.cat'] )
median = get_median('' + search_params['TEMPDIR'] + 'all.diffA.cat','magdiff')
utilities.run('ldacfilter -i ' + search_params['TEMPDIR'] + 'all.diffA.cat -o ' + search_params['TEMPDIR'] + 'all.diffB.cat -t PSSC\
-c "((magdiff > ' + str(median -1.25) + ') AND (magdiff < ' + str(median + 1.25) + '));"',['' + search_params['TEMPDIR'] + 'good.colors'])
utilities.run('ldaccalc -i ' + search_params['TEMPDIR'] + 'all.diffB.cat -t PSSC -c "(SEx_MaxVal + SEx_BackGr);" -k FLOAT -n MaxVal "" -o ' + search_params['TEMPDIR'] + 'all.diff.cat' ,['' + search_params['TEMPDIR'] + 'all.diff.cat'] )
command = 'ldactoasc -b -q -i ' + search_params['TEMPDIR'] + 'all.diff.cat -t PSSC -k SEx_' + mag + ' ' + d['filter'] + 'mag SEx_FLUX_RADIUS ' + im_type + ' SEx_CLASS_STAR' + im_type + ' ' + d['filter'] + 'err ' + d['color1'] + ' MaxVal > ' + search_params['TEMPDIR'] + 'mk_sat_all'
#print command
#raw_input()
utilities.run(command,['' + search_params['TEMPDIR'] + 'mk_sat_all'] )
import commands
length = commands.getoutput('wc -l ' + search_params['TEMPDIR'] + 'mk_sat_all')
print 'TOTAL # of STARS:', length
cuts_to_make = ['MaxVal>27500.0','Clean!=1','SEx_IMAFLAGS_ISO'+im_type + '!=0','SEx_CLASS_STAR'+im_type+ class_star,'SEx_Flag'+im_type+'!=0',]
files = ['' + search_params['TEMPDIR'] + 'mk_sat_all']
titles = ['raw']
for cut in cuts_to_make:
#print 'making cut:', cut
cut_name = cut.replace('>','').replace('<','')
os.system('rm ' + cut_name)
command = 'ldacfilter -i ' + search_params['TEMPDIR'] + 'all.diff.cat -o ' + search_params['TEMPDIR'] + '' + cut_name + ' -t PSSC\
-c "(' + cut + ');"'
utilities.run(command,['' + search_params['TEMPDIR'] + '' + cut_name])
import glob
#print len(glob.glob('' + search_params['TEMPDIR'] + '' + cut_name)), glob.glob('' + search_params['TEMPDIR'] + '' + cut_name)
if len(glob.glob('' + search_params['TEMPDIR'] + '' + cut_name)) > 0:
utilities.run('ldactoasc -b -q -i ' + search_params['TEMPDIR'] + '' + cut_name + ' -t PSSC\
-k SEx_' + mag + ' ' + d['filter'] + 'mag SEx_FLUX_RADIUS SEx_CLASS_STAR ' + d['filter'] + 'err ' + d['color1'] + ' > ' + search_params['TEMPDIR'] + '' + cut_name + '.cat',['' + search_params['TEMPDIR'] + '' + cut_name + '.cat'])
length = commands.getoutput('wc -l ' + search_params['TEMPDIR'] + '' + cut_name + '.cat')
print 'TOTAL # of STARS CUT:', length
titles.append(cut_name)
files.append('' + search_params['TEMPDIR'] + '' + cut_name + '.cat')
#run('ldactoasc -b -q -i cutout1.' + ppid + ' -t PSSC\
# -k Ra Dec > ' + search_params['TEMPDIR'] + '' + outfile,['' + search_params['TEMPDIR'] + '' + outfile])
#run('mkreg.pl -c -rad 8 -xcol 0 -ycol 1 -wcs -colour ' + color + ' ' + search_params['TEMPDIR'] + '' + outfile)
utilities.run('ldacfilter -i ' + search_params['TEMPDIR'] + 'all.diff.cat -o ' + search_params['TEMPDIR'] + 'good.stars -t PSSC\
-c "(MaxVal<27500 AND SEx_IMAFLAGS_ISO'+im_type+'=0);"',['' + search_params['TEMPDIR'] + 'good.stars'])
#-c "((MaxVal<27500 AND SEx_CLASS_STAR'+im_type+class_star + ') AND SEx_IMAFLAGS_ISO'+im_type+'=0);"',['' + search_params['TEMPDIR'] + 'good.stars'])
#-c "(MaxVal<27500 AND SEx_IMAFLAGS_ISO'+im_type+'=0);"',['' + search_params['TEMPDIR'] + 'good.stars' + ppid])
utilities.run('ldactoasc -b -q -i ' + search_params['TEMPDIR'] + 'good.stars -t PSSC\
-k SEx_' + mag + ' ' + d['filter'] + 'mag SEx_FLUX_RADIUS' + im_type + ' SEx_CLASS_STAR'+im_type+' ' + d['filter'] + 'err ' + d['color1'] + ' > ' + search_params['TEMPDIR'] + 'mk_sat',['' + search_params['TEMPDIR'] + 'mk_sat'])
if len(glob.glob('' + search_params['TEMPDIR'] + 'mk_sat')) > 0:
files.append('' + search_params['TEMPDIR'] + 'mk_sat')
titles.append('filtered')
print files, titles
mk_saturation_plot.mk_saturation_all(files,titles,filter)
raw_input()
#cutout('' + search_params['TEMPDIR'] + 'good.stars' + ppid,mag)
print mag
val = raw_input("Look at the saturation plot?")
if len(val)>0:
if val[0] == 'y' or val[0] == 'Y':
mk_saturation_plot.mk_saturation(search_params['TEMPDIR'] + '/mk_sat',filter)
val = raw_input("Make a box?")
if len(val)>0:
if val[0] == 'y' or val[0] == 'Y':
mk_saturation_plot.use_box(filter)
lower_mag,upper_mag,lower_diff,upper_diff = re.split('\s+',open('box' + filter,'r').readlines()[0])
utilities.run('ldacfilter -i ' + search_params['TEMPDIR'] + '/good.stars -t PSSC\
-c "(((SEx_' + mag + '>' + lower_mag + ') AND (SEx_' + mag + '<' + upper_mag + ')) AND (magdiff>' + lower_diff + ')) AND (magdiff<' + upper_diff + ');"\
-o ' + search_params['TEMPDIR'] + '/filt.mag.new.cat',[search_params['TEMPDIR'] + '/filt.mag.new.cat'])
raw_input()
os.system('mv ' + search_params['TEMPDIR'] + '/filt.mag.new.cat ' + search_params['TEMPDIR'] + '/good.stars')
#val = []
#val = raw_input("Look at the saturation plot?")
#if len(val)>0:
# if val[0] == 'y' or val[0] == 'Y':
# mk_saturation_plot.mk_saturation('' + search_params['TEMPDIR'] + 'mk_sat' + ppid,filter)
# make stellar saturation plot
#lower_mag,upper_mag,lower_diff,upper_diff = re.split('\s+',open('box' + filter,'r').readlines()[0])
lower_mag = str(10)
upper_mag = str(14.0)
lower_diff = str(5)
upper_diff = str(9)
if type == 'star':
lower_mag = str(13.2)
utilities.run('ldactoasc -b -q -i ' + search_params['TEMPDIR'] + 'good.stars -t PSSC -k SEx_Xpos_ABS SEx_Ypos_ABS > ' + search_params['TEMPDIR'] + 'positions',[search_params['TEMPDIR'] + 'positions'] )
utilities.run('ldacaddkey -i ' + search_params['TEMPDIR'] + 'good.stars -o ' + search_params['TEMPDIR'] + 'filt.airmass.cat -t PSSC -k AIRMASS 0.0 FLOAT "" ',[search_params['TEMPDIR'] + 'filt.airmass.cat'] )
utilities.run('ldacfilter -i ' + search_params['TEMPDIR'] + 'filt.airmass.cat -o ' + search_params['TEMPDIR'] + 'filt.crit.cat -t PSSC\
-c "((magdiff>-900) AND magdiff<900) AND SEx_' + mag + '!=0) ;"',['' + search_params['TEMPDIR'] + 'filt.crit.cat'])
utilities.run('ldacfilter -i ' + search_params['TEMPDIR'] + 'filt.crit.cat -o ' + search_params['TEMPDIR'] + 'all.colors.cat -t PSSC\
-c "(((' + d['color1'] + '<900 AND ' + d['color2'] + '<900) AND ' + d['color1'] + '>-900) AND ' + d['color2'] + '>-900);"',['' + search_params['TEMPDIR'] + 'all.colors.cat'])
utilities.run('ldactoasc -b -q -i ' + search_params['TEMPDIR'] + 'all.colors.cat -t PSSC -k SEx_' + mag + ' ' + d['filter'] + 'mag ' + d['color1'] + ' ' + d['color2'] + ' AIRMASS SEx_' + magerr + ' ' + d['filter'] + 'err SEx_Xpos_ABS SEx_Ypos_ABS > ' + search_params['TEMPDIR'] + 'input.asc' ,['' + search_params['TEMPDIR'] + 'input.asc'] )
import photo_abs_new
good = photo_abs_new.run_through('illumination',infile='' + search_params['TEMPDIR'] + 'input.asc',output='' + search_params['TEMPDIR'] + 'photo_res',extcoeff=d['color1'],sigmareject=6,step='STEP_1',bandcomp=d['filter'],color1which=d['color1'],color2which=d['color2'])
import astropy.io.fits as pyfits
cols = []
for key in ['corr_data','color1_good','color2_good','magErr_good','X_good','Y_good','airmass_good']:
cols.append(pyfits.Column(name=key, format='E',array=good[key]))
hdu = pyfits.PrimaryHDU()
hdulist = pyfits.HDUList([hdu])
print cols
tbhu = pyfits.BinTableHDU.from_columns(cols)
hdulist.append(tbhu)
hdulist[1].header['EXTNAME']='STDTAB'
path='/nfs/slac/g/ki/ki05/anja/SUBARU/%(cluster)s/' % {'cluster':search_params['cluster']}
outcat = path + 'PHOTOMETRY/ILLUMINATION/fit_' + im_type + '_' + search_params['SUPA'] + '_' + type + '.cat'
os.system('rm ' + outcat)
hdulist.writeto(outcat)
save_exposure({'fit_cat_' + im_type + '_' + type: outcat,'airmass_add':'yes'},SUPA,FLAT_TYPE)
save_fit(good['fits'],im_type,type,SUPA,FLAT_TYPE)
def nightrun():
import MySQLdb, sys, os, re, time, utilities, pyfits
from copy import copy
db2 = MySQLdb.connect(db='subaru', user='weaklensing', passwd='darkmatter', host='ki-rh8')
c = db2.cursor()
keystop = ['PPRUN']
list = reduce(lambda x,y: x + ',' + y, keystop)
command="SELECT " + list + " from illumination_db where zp_star_ is not null and PPRUN!='KEY_N/A' GROUP BY PPRUN"
print command
c.execute(command)
results=c.fetchall()
db_keys = describe_db(c)
h = []
for line in results:
dtop = {}
for i in range(len(keystop)):
dtop[keystop[i]] = line[i]
directory = 'run_' + dtop['PPRUN']
os.system('mkdir ' + os.environ['sne'] + '/plots/' + directory )
os.system('rm ' + os.environ['sne'] + '/plots/' + directory + '/*')
keys = ['cluster','ROTATION']
list = reduce(lambda x,y: x + ',' + y, keys)
command="SELECT " + list + " from illumination_db where zp_star_ is not null and PPRUN='" + dtop['PPRUN'] + "' GROUP BY cluster,ROTATION"
print command
c.execute(command)
results=c.fetchall()
db_keys = describe_db(c)
h = []
for line in results:
d = {}
for i in range(len(keys)):
d[keys[i]] = line[i]
if 1:
#print d
if 1:
crit = reduce(lambda x,y: x + ' AND ' + y,[str(y) + "='" + str(d[y]) + "'" for y in keys])
file = directory + '/' + reduce(lambda x,y: x + 'AND' + y,[str(y)[0:4] + "_" + str(d[y]) for y in keys])
#print crit
command = "SELECT * from illumination_db where zp_star_ is not null and " + crit
#print command
c.execute(command)
results = c.fetchall()
#print results
fit_files = []
for j in range(len(results)):
dict = {}
for i in range(len(results[j])):
dict[db_keys[i]] = results[j][i]
#print dict['SUPA'], dict['cluster'], dict['pasted_cat'], dict['matched_cat_star']
fit_files.append(dict['fit_cat__star'])
#print fit_files
dict = get_files(dict['SUPA'],dict['FLAT_TYPE'])
#print dict.keys()
search_params = initialize(dict['filter'],dict['cluster'])
search_params.update(dict)
from copy import copy
import photo_abs_new
reload(photo_abs_new)
files = reduce(lambda x,y: x + ' ' + y,fit_files)
#print files
tempfile = '' + search_params['TEMPDIR'] + 'spit'
command = 'ldacpaste -i ' + files + ' -t STDTAB -o ' + tempfile
print command
utilities.run(command)
hdulist = pyfits.open(tempfile)
args = {}
for column in hdulist["STDTAB"].columns:
args[column.name] = hdulist["STDTAB"].data.field(column.name)
photo_abs_new.calcDataIllum(file,search_params['LENGTH1'], search_params['LENGTH2'], 1000, args['corr_data'], args['airmass_good'], args['color1_good'], args['color2_good'], args['magErr_good'], args['X_good'], args['Y_good'],rot=0)
#except: print 'failed'
def auto_print():
import MySQLdb, sys, os, re, time, utilities, pyfits
from copy import copy
db2 = MySQLdb.connect(db='subaru', user='weaklensing', passwd='darkmatter', host='ki-rh8')
c = db2.cursor()
keys = ['FILTER','ROTATION']
list = reduce(lambda x,y: x + ',' + y, keys)
command="SELECT " + list + " from illumination_db where zp_star_ is not null and PPRUN!='KEY_N/A' and good_stars_star_ > 400 GROUP BY "+list
print command
c.execute(command)
results=c.fetchall()
db_keys = describe_db(c)
h = []
for line in results:
d = {}
for i in range(len(keys)):
d[keys[i]] = line[i]
if 1:
print d
if 1:
crit = reduce(lambda x,y: x + ' AND ' + y,[str(y) + "='" + str(d[y]) + "'" for y in keys])
file = 'filt_' + reduce(lambda x,y: x + 'AND' + y,[str(y)[0:4] + "_" + str(d[y]) for y in keys])
print crit
command = "SELECT * from illumination_db where zp_star_ is not null and " + crit
print command
c.execute(command)
results = c.fetchall()
print results
fit_files = []
for j in range(len(results)):
dict = {}
for i in range(len(results[j])):
dict[db_keys[i]] = results[j][i]
print dict['SUPA'], dict['cluster'], dict['pasted_cat'], dict['matched_cat_star']
fit_files.append(dict['fit_cat__star'])
print fit_files
dict = get_files(dict['SUPA'],dict['FLAT_TYPE'])
print dict.keys()
search_params = initialize(dict['filter'],dict['cluster'])
search_params.update(dict)
from copy import copy
import photo_abs_new
reload(photo_abs_new)
files = reduce(lambda x,y: x + ' ' + y,fit_files)
print files
tempfile = '' + search_params['TEMPDIR'] + 'spit'
command = 'ldacpaste -i ' + files + ' -t STDTAB -o ' + tempfile
print command
utilities.run(command)
hdulist = pyfits.open(tempfile)
args = {}
for column in hdulist["STDTAB"].columns:
args[column.name] = hdulist["STDTAB"].data.field(column.name)
photo_abs_new.calcDataIllum(file,search_params['LENGTH1'], search_params['LENGTH2'], 1000, args['corr_data'], args['airmass_good'], args['color1_good'], args['color2_good'], args['magErr_good'], args['X_good'], args['Y_good'],rot=0)
#except: print 'failed'
def describe_db(c,db='illumination_db'):
command = "DESCRIBE illumination_db"
print command
c.execute(command)
results = c.fetchall()
keys = []
for line in results:
keys.append(line[0])
return keys
def printer():
import MySQLdb, sys, os, re, time, utilities, pyfits
from copy import copy
db2 = MySQLdb.connect(db='subaru', user='weaklensing', passwd='darkmatter', host='ki-rh8')
c = db2.cursor()
if 1: #for set in [{'cluster':'HDFN', 'filters':['W-J-B','W-J-V','W-C-RC','W-C-IC','W-S-Z+']},{'cluster':'MACS2243-09', 'filters':['W-J-V','W-C-RC','W-C-IC','W-S-Z+']},{'cluster':'A2219', 'filters':['W-J-B','W-J-V','W-C-RC']}]:
#cluster = set['cluster']
if 1: #for filter in set['filters']:
if 1: #try:
print keys
cluster = 'HDFN'
filter = 'W-C-ICSF'
ROTATION = 1
command = "select * from illumination_db where cluster='" + cluster + "' and filter='" + filter + "' and fit_cat_galaxy is not null and crfixed='third' and good_stars_star is not null and good_stars_star>10 and ROTATION=" + str(ROTATION)
command = "select * from illumination_db where SUPA='SUPA0011022' and zp_err_galaxy_D is not null"
#command = "select * from illumination_db where cluster='" + cluster + "' and filter='" + filter + "' and fit_cat_galaxy is not null and crfixed='third' and ROTATION=" + str(ROTATION) + ' and good_stars_star is not null and good_stars_star>10'
command = "SELECT * from illumination_db where zp_star_ is not null and ROTATION='0'" # where cluster='HDFN' and filter='W-J-V' and ROTATION=0"
print command
c.execute(command)
results = c.fetchall()
fit_files = []
for j in range(len(results)):
dict = {}
for i in range(len(results[j])):
dict[keys[i]] = results[j][i]
print dict['SUPA'], dict['cluster'], dict['pasted_cat'], dict['matched_cat_star']
fit_files.append(dict['fit_cat__star'])
print fit_files
dict = get_files(dict['SUPA'],dict['FLAT_TYPE'])
print dict.keys()
search_params = initialize(dict['filter'],dict['cluster'])
search_params.update(dict)
from copy import copy
import photo_abs_new
reload(photo_abs_new)
files = reduce(lambda x,y: x + ' ' + y,fit_files)
print files
tempfile = '' + search_params['TEMPDIR'] + 'spit'
command = 'ldacpaste -i ' + files + ' -t STDTAB -o ' + tempfile
print command
utilities.run(command)
hdulist = pyfits.open(tempfile)
args = {}
for column in hdulist["STDTAB"].columns:
args[column.name] = hdulist["STDTAB"].data.field(column.name)
file = cluster + '_' + filter + '_' + str(ROTATION)
file = raw_input('filename?')
photo_abs_new.calcDataIllum(file,search_params['LENGTH1'], search_params['LENGTH2'], 1000, args['corr_data'], args['airmass_good'], args['color1_good'], args['color2_good'], args['magErr_good'], args['X_good'], args['Y_good'],rot=0)
#except: print 'failed'
#filter = 'W-C-IC'
import pickle
#filters = ['W-J-B','W-J-V','W-C-RC','W-C-IC','W-S-Z+']
#for filter in filters:
# exposures_zero = {}
# exposures_one = {}
# print '$$$$$'
# print 'separating into different camera rotations'
# for exposure in exposures.keys():
# print exposure,exposures[exposure]['keywords']['ROTATION']
# if int(exposures[exposure]['keywords']['ROTATION']) == 1:
# exposures_one[exposure] = exposures[exposure]
# if int(exposures[exposure]['keywords']['ROTATION']) == 0:
# exposures_zero[exposure] = exposures[exposure]
if 0:
reopen = 0
save = 0
if reopen:
f = open('' + search_params['TEMPDIR'] + 'tmppickle' + cluster + filter,'r')
m = pickle.Unpickler(f)
exposures, LENGTH1, LENGTH2 = m.load()
print image.latest
if 1: images = gather_exposures(filter,cluster)
print images
''' strip down exposure list '''
for key in exposures.keys():
print exposures[key]['images']
for image in exposures:
if 1: image.find_seeing(exposures) # save seeing info?
if 1: image.sextract(exposures)
if 1: image.match_simple(exposures,cluster)
if 1: image.phot(exposures,filter,type,LENGTH1,LENGTH2)
if save:
f = open('' + search_params['TEMPDIR'] + 'tmppickle' + cluster + filter,'w')
m = pickle.Pickler(f)
pickle.dump([exposures,LENGTH1,LENGTH2],m)
f.close()
def match_cluster():
import MySQLdb, sys, os, re, time, utilities, pyfits
from copy import copy
db2 = MySQLdb.connect(db='subaru', user='weaklensing', passwd='darkmatter', host='ki-rh8')
c = db2.cursor()
db_keys = describe_db(c)
keystop = ['PPRUN','ROTATION','cluster']
list = reduce(lambda x,y: x + ',' + y, keystop)
command="SELECT " + list + " from illumination_db where zp_star_ is not null and PPRUN='2003-07-27_W-J-B' and OBJECT='MJ2129' GROUP BY cluster,ROTATION"
print command
c.execute(command)
results=c.fetchall()
for line in results:
dtop = {}
for i in range(len(keystop)):
dtop[keystop[i]] = str(line[i])
keys = ['SUPA','cluster','ROTATION','PPRUN','pasted_cat']
list = reduce(lambda x,y: x + ',' + y, keys)
command="SELECT " + list + " from illumination_db where zp_star_ is not null and cluster='"+dtop['cluster'] + "' and PPRUN='" + dtop['PPRUN'] + "'"#+ "' GROUP BY cluster,ROTATION"
print command
c.execute(command)
results=c.fetchall()
db_keys = describe_db(c)
field = []
info = []
for line in results:
d = {}
for i in range(len(keys)):
d[keys[i]] = str(line[i])
key = str(int(float(d['ROTATION']))) + '#' + d['SUPA'] + '#'
field.append({'key':key,'pasted_cat':d['pasted_cat']})
info.append([d['ROTATION'],d['SUPA']])
print field
a = raw_input('match?')
if a[0] == 'y':
match_many([[x['pasted_cat'],x['key']] for x in field])
print info
raw_input()
script = reduce(lambda x,y: x + ' ' + y,[x['pasted_cat'] + ' ' + x['key'] for x in field])
print '\n\nDONE'
raw_input()
def make_ssc_config(list):
ofile = '/tmp/tmp.cat'
out = open('/tmp/tmp.ssc','w')
import os, string, re
keys = []
i = -1
for file_name,prefix in list:
i += 1
print file_name
os.system('ldacdesc -t OBJECTS -i ' + file_name + ' > ' + ofile)
file = open(ofile,'r').readlines()
for line in file:
if string.find(line,"Key name") != -1:
red = re.split('\.+',line)
key = red[1].replace(' ','').replace('\n','')
out_key = prefix + key
out.write("COL_NAME = " + out_key + '\nCOL_INPUT = ' + key + '\nCOL_MERGE = AVE_REG\nCOL_CHAN = ' + str(i) + "\n#\n")
#print key
keys.append(key)
out.close()
def make_ssc_config_few(list):
ofile = '/tmp/tmp.cat'
out = open('/tmp/tmp.ssc','w')
import os, string, re
key_list = ['MAG_APER2','MAGERR_APER2','Xpos_ABS','Ypos_ABS','CLASS_STAR','MaxVal','BackGr']
keys = []
i = -1
for file_name,prefix in list:
i += 1
print file_name
os.system('ldacdesc -t OBJECTS -i ' + file_name + ' > ' + ofile)
file = open(ofile,'r').readlines()
for line in file:
if string.find(line,"Key name") != -1 :
red = re.split('\.+',line)
key = red[1].replace(' ','').replace('\n','')
out_key = prefix + key
if reduce(lambda x,y: x+ y, [string.find(out_key,k)!=-1 for k in key_list]):
out.write("COL_NAME = " + out_key + '\nCOL_INPUT = ' + key + '\nCOL_MERGE = AVE_REG\nCOL_CHAN = ' + str(i) + "\n#\n")
#print key
keys.append(key)
out.close()
def make_ssc_config_colors(list):
ofile = '/tmp/tmp.cat'
out = open('/tmp/tmp.ssc','w')
import os, string, re
keys = []
i = -1
for file_name,prefix in list:
i += 1
print file_name
os.system('ldacdesc -t OBJECTS -i ' + file_name + ' > ' + ofile)
file = open(ofile,'r').readlines()
for line in file:
if string.find(line,"Key name") != -1:
red = re.split('\.+',line)
key = red[1].replace(' ','').replace('\n','')
out_key = key + '_' + prefix
out.write("COL_NAME = " + out_key + '\nCOL_INPUT = ' + key + '\nCOL_MERGE = AVE_REG\nCOL_CHAN = ' + str(i) + "\n#\n")
#print key
keys.append(key)
out.close()
def threesec():
list = [['/nfs/slac/g/ki/ki05/anja/SUBARU/MACS0417-11/PHOTOMETRY/ILLUMINATION/pasted_SUPA0105807_W-C-RC_2009-01-23_CALIB_0.0.cat','W-C-RC'],['/nfs/slac/g/ki/ki05/anja/SUBARU/MACS0417-11/PHOTOMETRY/ILLUMINATION/pasted_SUPA0105787_W-J-V_2009-01-23_CALIB_0.0.cat','W-J-V'],['/nfs/slac/g/ki/ki05/anja/SUBARU/MACS0417-11/PHOTOMETRY/ILLUMINATION/pasted_SUPA0050786_W-C-IC_2006-12-21_CALIB_0.0.cat','W-C-IC']]
match_many(list)
def match_many(list):
#make_ssc_config_colors(list)
make_ssc_config_few(list)
import os
files = []
for file,prefix in list:
print file
command = 'ldacaddkey -i %(inputcat)s -t OBJECTS -o %(outputcat)s -k A_WCS_assoc 0.0003 FLOAT "" \
B_WCS_assoc 0.0003 FLOAT "" \
Theta_assoc 0.0 FLOAT "" \
Flag_assoc 0 SHORT "" ' % {'inputcat':file,'outputcat':file + '.assoc1'}
os.system(command)
#command = 'ldacrenkey -i %(inputcat)s -o %(outputcat)s -k ALPHA_J2000 Ra DELTA_J2000 Dec' % {'inputcat':file + '.assoc1','outputcat':file+'.assoc2'}
#os.system(command)
files.append(file+'.assoc1')
files_input = reduce(lambda x,y:x + ' ' + y,files)
files_output = reduce(lambda x,y:x + ' ' + y,[z+'.assd' for z in files])
print files
print files_input, files_output
command = 'associate -i %(inputcats)s -o %(outputcats)s -t OBJECTS -c ./photconf/fullphotom.alpha.associate' % {'inputcats':files_input,'outputcats':files_output}
print command
os.system(command)
outputcat = '/tmp/final.cat'
command = 'make_ssc -i %(inputcats)s \
-o %(outputcat)s\
-t OBJECTS -c /tmp/tmp.ssc ' % {'inputcats':files_output,'outputcat':outputcat}
os.system(command)
def match_inside(SUPA1,SUPA2,FLAT_TYPE):
dict1 = get_files(SUPA1,FLAT_TYPE)
search_params1 = initialize(dict1['filter'],dict1['cluster'])
search_params1.update(dict1)
dict2 = get_files(SUPA2,FLAT_TYPE)
search_params2 = initialize(dict2['filter'],dict2['cluster'])
search_params2.update(dict2)
import os
path='/nfs/slac/g/ki/ki05/anja/SUBARU/%(cluster)s/' % {'cluster':search_params1['cluster']}
illum_path='/nfs/slac/g/ki/ki05/anja/SUBARU/ILLUMINATION/' % {'cluster':search_params1['cluster']}
#os.system('mkdir -p ' + path + 'PHOTOMETRY/ILLUMINATION/')
os.system('mkdir -p ' + path + 'PHOTOMETRY/ILLUMINATION/SELF/')
from glob import glob
catalog1 = search_params1['pasted_cat']
catalog2 = search_params2['pasted_cat']
#os.system('ldacrentab -i ' + catalog2 + ' -t OBJECTS STDTAB -o ' + catalog2.replace('cat','std.cat'))
filter = search_params1['filter'] #exposures[exposure]['keywords']['filter']
OBJECT = search_params1['OBJECT'] #exposures[exposure]['keywords']['OBJECT']
outcat = path + 'PHOTOMETRY/ILLUMINATION/SELF/matched_' + SUPA1 + '_' + filter + '_' + '_self.cat'
file = 'matched_' + SUPA1 + '.cat'
os.system('rm ' + outcat)
command = 'match_simple_cats.sh ' + catalog1 + ' ' + catalog2 + ' ' + outcat
print command
os.system(command)
save_exposure({'matched_cat_self':outcat},SUPA1,FLAT_TYPE)
print outcat
def getTableInfo():
import astropy.io.fits as pyfits, sys, os, re, string, copy , string
p = pyfits.open('/tmp/final.cat')
tbdata = p[1].data
types = []
ROTS = {}
KEYS = {}
for column in p[1].columns:
if string.find(column.name,'#') != -1:
print column
res = re.split('\#',column.name)
ROT = res[0]
IMAGE = res[1]
KEY = res[2]
if not ROTS.has_key(ROT):
ROTS[ROT] = []
if not len(filter(lambda x:x==IMAGE,ROTS[ROT])):
ROTS[ROT].append(IMAGE)
return ROTS
def diffCalcNew():
import astropy.io.fits as pyfits, sys, os, re, string, copy , string
p = pyfits.open('/tmp/final.cat')
tbdata = p[1].data
types = []
ROTS = {}
KEYS = {}
for column in p[1].columns:
if string.find(column.name,'#') != -1:
print column
res = re.split('\#',column.name)
ROT = res[0]
IMAGE = res[1]
KEY = res[2]
if not ROTS.has_key(ROT):
ROTS[ROT] = []
if not len(filter(lambda x:x==IMAGE,ROTS[ROT])):
ROTS[ROT].append(IMAGE)
print ROTS
raw_input()
#good = 0
#for i in range(len(tbdata)):
# array = []
# for y in ROTS[ROT]:
# array += [tbdata.field(ROT+'#'+y+'#CLASS_STAR')[i] for y in ROTS[ROT]]
# array.sort()
# if array[-1]>0.9 and array[-2]>0.9:
# good += 1
#print good, len(tbdata)
#raw_input()
def selectGoodStars(EXPS):
''' the top two most star-like objects have CLASS_STAR>0.9 and, for each rotation, their magnitudes differ by less than 0.01 '''
import astropy.io.fits as pyfits, sys, os, re, string, copy , string, scipy
p = pyfits.open('/tmp/final.cat')
table = p[1].data
star_good = [] #= scipy.zeros(len(table))
supas = []
for i in range(len(table)):
mags_ok = False
class_star_array = []
include_star = []
name = []
for ROT in EXPS.keys():
mags_array = []
class_star_array += [table.field(ROT+'#'+y+'#CLASS_STAR')[i] for y in EXPS[ROT]]
mags_array += [table.field(ROT+'#'+y+'#MAG_APER2')[i] for y in EXPS[ROT]]
include_star += [((table.field(ROT+'#'+y+'#MaxVal')[i] + table.field(ROT+'#'+y+'#BackGr')[i]) < 27500 and table.field(ROT+'#'+y+'#CLASS_STAR')[i] > 0.5) and table.field(ROT+'#'+y+'#MAG_APER2')[i] < 40 for y in EXPS[ROT]]
name += [{'name':EXPS[ROT][z],'rotation':ROT} for z in range(len(EXPS[ROT]))]
mags_array.sort()
if len(mags_array) > 1:
if abs(mags_array[0] - mags_array[1]) < 0.2:
mags_ok = True
class_star_array.sort()
if mags_ok:
file_list=[]
for j in range(len(include_star)):
if include_star[j]:
file_list.append(name[j])
if len(file_list) > 1:
star_good.append(i)
supas.append({'table index':i,'supa files':file_list})
#print len(supas)
if i%100==0: print i
return star_good, supas
def diffCalc(SUPA1,FLAT_TYPE):
dict = get_files(SUPA1,FLAT_TYPE)
search_params = initialize(dict['filter'],dict['cluster'])
search_params.update(dict)
import astropy.io.fits as pyfits, sys, os, re, string, copy
print search_params['matched_cat_self']
p = pyfits.open(search_params['matched_cat_self'])
tbdata = p[1].data
mask = tbdata.field('SEx_MaxVal') + tbdata.field('SEx_BackGr') < 27500
newtbdata = tbdata[mask]
print len(newtbdata)
mask = newtbdata.field('CLASS_STAR') > 0.95
newtbdata = newtbdata[mask]
mask = abs(newtbdata.field('SEx_MAG_APER2') - newtbdata.field('MAG_APER2')) < 0.01
new2tbdata = newtbdata[mask]
print len(new2tbdata)
data = new2tbdata.field('SEx_MAG_APER2') - new2tbdata.field('MAG_APER2')
magErr = new2tbdata.field('SEx_MAGERR_APER2')
X = new2tbdata.field('Xpos_ABS')
Y = new2tbdata.field('Ypos_ABS')
file = 'test'
calcDataIllum(file,search_params['LENGTH1'], search_params['LENGTH2'],data,magErr,X,Y)
data_save = []
magErr_save = []
X_save = []
Y_save = []
for i in range(len(data)):
data_save.append([new2tbdata.field('SEx_MAG_APER2')[i],new2tbdata.field('MAG_APER2')[i]])
magErr_save.append([new2tbdata.field('SEx_MAGERR_APER2')[i],new2tbdata.field('MAGERR_APER2')[i]])
X_save.append([new2tbdata.field('Xpos_ABS')[i],new2tbdata.field('SEx_Xpos_ABS')[i]])
Y_save.append([new2tbdata.field('Ypos_ABS')[i],new2tbdata.field('SEx_Ypos_ABS')[i]])
return data_save, magErr_save, X_save, Y_save
def calcDataIllum(file, LENGTH1, LENGTH2, data,magErr, X, Y, rot=0):
import numpy, math, pyfits, os
from ppgplot import *
#print size_x, size_y, bin, size_x/bin
x = []
y = []
z = []
zerr = []
from copy import copy
X_sort = copy(X)
Y_sort = copy(Y)
X_sort = numpy.sort(X_sort)
Y_sort = numpy.sort(Y_sort)
X_min = X_sort[0]
Y_min = Y_sort[0]
X_max = X_sort[-1]
Y_max = Y_sort[-1]
X_width = abs(X_max - X_min)
Y_width = abs(Y_max - Y_min)
nbin1 =10
nbin2 =10
LENGTH1 = LENGTH1
LENGTH2 = LENGTH2
print LENGTH1, LENGTH2
#raw_input()
bin1 = int(LENGTH1/nbin1)
bin2 = int(LENGTH2/nbin2)
diff_weightsum = -9999*numpy.ones([nbin1,nbin2])
diff_invvar = -9999*numpy.ones([nbin1,nbin2])
X_cen = []
Y_cen = []
data_cen = []
zerr_cen = []
chisq = 0
for i in range(len(data)):
if 1: # LENGTH1*0.3 < X[i] < LENGTH1*0.6:
X_cen.append(X[i])
Y_cen.append(Y[i])
data_cen.append(data[i])
zerr_cen.append(magErr[i])
x.append(X[i])
y.append(Y[i])
z.append(data[i])
zerr.append(magErr[i])
chisq += data[i]**2./magErr[i]**2.
x_val = int((X[i])/float(bin1)) # + size_x/(2*bin)
y_val = int((Y[i])/float(bin2)) #+ size_y/(2*bin)
#print LENGTH1, LENGTH2, x_val, y_val, X[i], Y[i]
#raw_input()
#print size_x/bin+1,size_y/bin+1, x_val, y_val, X[i], Y[i]
err = magErr[i]
''' lower limit on error '''
if err < 0.04: err = 0.04
weightsum = data[i]/err**2.
invvar = 1/err**2.
#if 1: #0 <= x_val and x_val < int(nbin1) and y_val >= 0 and y_val < int(nbin2): #0 < x_val < size_x/bin and 0 < y_val < size_y/bin:
#print x_val, y_val
try:
if diff_weightsum[x_val][y_val] == -9999:
diff_weightsum[x_val][y_val] = weightsum
diff_invvar[x_val][y_val] = invvar
else:
diff_weightsum[x_val][y_val] += weightsum
diff_invvar[x_val][y_val] += invvar
except: print 'fail'
redchisq = chisq**0.5 / len(data)
print 'redchisq', redchisq
#raw_input()
import Numeric
x_p = Numeric.array(X_cen)
y_p = Numeric.array(Y_cen)
z_p = Numeric.array(data_cen)
zerr_p = Numeric.array(zerr_cen)
x.sort()
y.sort()
z.sort()
mean = diff_weightsum/diff_invvar
print 'mean'
#print mean
err = 1/diff_invvar**0.5
print 'err'
#print err
print 'writing'
hdu = pyfits.PrimaryHDU(mean)
pth = '/nfs/slac/g/ki/ki04/pkelly/plots/'
f = pth + file
os.system('rm ' + f + 'diffmap.fits')
hdu.writeto( f + 'diffmap.fits')
hdu = pyfits.PrimaryHDU(err)
os.system('rm ' + f + 'diffinvar.fits')
hdu.writeto( f + 'diffinvar.fits')
pgbeg(f + 'pos.ps'+"/cps",1,1)
pgiden()
#print x_p
#print z_p
#print zerr_p
#pgswin(x[0],x[-1],z[0],z[-1])
### plot positions
pgpanl(1,1)
pgswin(x[0],x[-1],y[0],y[-1])
pgbox()
pglab('X','Y',file) # label the plot
#pgsci(3)
#pgerrb(6,x_p,z_p,zerr_p)
pgpt(x_p,y_p,3)
pgend()
### plot residuals
pgbeg(f + 'diff.ps'+"/cps",1,2)
pgiden()
#print x_p
#print z_p
#print zerr_p
#pgswin(x[0],x[-1],z[0],z[-1])
pgpanl(1,1)
pgswin(x[0],x[-1],-0.005,0.005)
pgbox()
pglab('X axis','SDSS-SUBARU',file) # label the plot
#pgsci(3)
#pgerrb(6,x_p,z_p,zerr_p)
pgpt(x_p,z_p,3)
#pgswin(y[0],y[-1],z[0],z[-1])
pgpanl(1,2)
pgswin(y[0],y[-1],-0.005,0.005)
pgsci(1)
pgbox()
pglab('Y axis','SDSS-SUBARU',file) # label the plot
#pgsci(3)
#pgerrb(6,y_p,z_p,zerr_p)
pgpt(y_p,z_p,3)
pgsci(1)
#print x_p
#print z_p
#print zerr_p
pgend()
return
def make_model(ROTS):
#polyterms = [['X','X','X'],['X','X','Y'],['X','Y','Y'],['Y','Y','Y'],['X','X'],['X','Y'],['Y','Y'],['X'],['Y']]
polyterms = [['Xpos_ABS','Xpos_ABS'],['Xpos_ABS','Ypos_ABS'],['Ypos_ABS','Ypos_ABS'],['Xpos_ABS'],['Ypos_ABS']]
''' break up parameters into rotation specific and exposure specific (the zeropoints) '''
model = {'ROT_SPECIFIC':[],'EXP_SPECIFIC':[]}
for ROTATION in ROTS.keys():
for term in polyterms:
name = reduce(lambda x,y: x + 'T' + y,term)
model['ROT_SPECIFIC'].append({'name':ROTATION+'#'+name,'rotation':ROTATION,'term':term,'value':0.1})
for IMAGE in ROTS[ROTATION]:
model['EXP_SPECIFIC'].append({'name':IMAGE+'#zp','image':IMAGE,'term':['zp'],'value':0.01})
fit = {'model':model,'fixed':[],'apply':[]}
print fit
return fit
def calc_model(p,X,Y,data,err):
for i in range(len(self.smodel)):
term = self.smodel[i]
model += p[i] * reduce(lambda x,y: x * y,[self.dict[z] for z in term])
status = 0
return([status, (model-y)/err])
class phot_funct:
def __init__(self,inputmodel,sfixed,EXPS,star_good,sapply=[],zps=0):
''' need to take EXPS and make a vector of parameters to pass to the fitting program as well as a dictionary '''
self.star_good = star_good
self.inputmodel = inputmodel
self.allterms = self.inputmodel['ROT_SPECIFIC'] + self.inputmodel['EXP_SPECIFIC']
self.parstart = [{'value':x['value'],'fixed':0.001} for x in self.allterms] # assign initial values to all parameters
self.pardict = {}
for x in range(len(self.allterms)):
self.pardict[self.allterms[x]['name']] = x
#self.pardict = [{self.allterms[x]['name']:x} for x in range(len(self.allterms))] # dictionary of parameter indicies for parameter names
self.model = [x['term'] for x in self.allterms] # make a list of the form of each term
print 'HERE'
print self.allterms
self.EXPS = EXPS
#self.p_dict = []
#self.smodeldict = {}
#for x in self.sinputmodel:
# self.smodeldict[x['name']] = x['term']
self.sfixed = sfixed
self.sapply = sapply
self.fitvars = {}
#fa = {"y": data, "err": err, 'X':X, 'Y':Y, 'maxVal':maxVal, 'classStar':classStar}
def calc_model(self, p, fjac=None, table=None):
# function you can pass to mpfit
self.dict = {'zp':1, 'table':table}
#print p
redchisqs = []
rows = len(table)
print rows
row_num = 0
for j in self.star_good:
row_num += 1
data = []
errs = []
models = []
numerators = []
denominators = []
for ROT in self.EXPS:
good_exps = []
for exp in self.EXPS[ROT]:
#print exp
if table.field(ROT+'#'+exp+'#MaxVal')[j] + table.field(ROT+'#'+exp+'#BackGr')[j] < 27500 and table.field(ROT+'#'+exp+'#CLASS_STAR')[j] > 0.9:
good_exps.append(exp)
#print good_exps, self.EXPS[ROT]
#raw_input()
#print good_stars, X[j], Y[j], y[j], maxVal[j], classStar[j]
#raw_input()
if len(good_exps) > 0:
tot = len(good_exps)
import scipy
#models = scipy.zeros(tot)
#numerators = scipy.zeros(tot)
#denominators = scipy.zeros(tot)
for exp in good_exps:
#print self.allterms
model_zp_terms = []
model_position_terms = []
for term in self.allterms:
if term.has_key('image'):
if term['image'] == exp:
model_zp_terms.append(term)
if term.has_key('rotation'):
#print term['rotation'], ROT, str(term['rotation']) == str(ROT)
if str(term['rotation']) == str(ROT):
model_position_terms.append(term)
#print model_zp_terms, model_position_terms
model = 0
''' add positionally depdendent terms '''
for term in model_position_terms:
#print table.field(ROT+'#'+exp+'#'+term['term'][0])[j]
#print self.pardict[term['name']]
model += p[self.pardict[term['name']]] * reduce(lambda x,y: x * y,[table.field(ROT+'#'+exp+'#'+z)[j] for z in term['term']])
''' add the zeropoint for that image '''
for term in model_zp_terms:
#print self.pardict[term['name']]
model += p[self.pardict[term['name']]]
data.append(table.field(ROT+'#'+exp+'#MAG_APER2')[j]**2.)
errs.append(table.field(ROT+'#'+exp+'#MAGERR_APER2')[j]**2.)
models.append(model)
numerators.append((model-table.field(ROT+'#'+exp+'#MAG_APER2')[j])/table.field(ROT+'#'+exp+'#MAGERR_APER2')[j]**2.)
denominators.append(1./table.field(ROT+'#'+exp+'#MAGERR_APER2')[j]**2.)
if len(data)>0:
''' we have already subtracted the image-dependent zeropoint so we just need to subtract the instrinsic magnitude of the star, which we get from an average '''
average = reduce(lambda x,y: x + y,numerators) / reduce(lambda x,y: x + y, denominators)
#print average
chisq = 0
for k in range(len(data)):
chisq += abs(models[k] - data[k] - average) / errs[k]
#print chisq
#print models[k], y[j][k], average, err[j][k]
redchisq = chisq/float(len(data))
#ydiff = y[j]['0'][0] - y[j]['0'][1]
#moddiff = models[0] - models[1]
if 0: #abs(moddiff - ydiff) < 0.001:
print X[j]
print Y[j]
print y[j]
print err[j]
print models
print 'moddiff', models[0] - models[1]
print 'y diff', y[j][0] - y[j][1]
print chisq
print redchisq
raw_input()
redchisqs.append(redchisq)
#redchisqs.append(abs(moddiff-ydiff)/err[j][0])
if row_num%500 == 0: print j
status = 0
import Numeric
redchisqs = Numeric.array(redchisqs)
#print redchisqs
return([status,redchisqs ])
def calc_sigma(self, p, fjac=None, y=None, err=None, X=None, Y=None):
# function you can pass to mpfit
self.dict = {'zp':1., 'color1':color1, 'color2':color2, 'airmass':airmass, 'X':X, 'Y':Y}
model = 0
for i in range(len(self.smodel)):
term = self.smodel[i]
#print term
model += p[i] * reduce(lambda x,y: x * y,[self.dict[z] for z in term])
status = 0
return([model, (model-y)/err])
def calcIllum(size_x, size_y, bin, fit):
import numpy, math, pyfits, os
fitvars = fit['class'].fitvars
x,y = numpy.meshgrid(numpy.arange(0,size_x,bin),numpy.arange(0,size_y,bin))
F=0.1
print 'calculating'
#epsilon = fitvars['X']*x + fitvars['Y']*y + fitvars['XTX']*x**2 + fitvars['YTY']*y**2 + fitvars['XTY']*x*y + fitvars['XTYTY']*x*y*y + fitvars['XTXTY']*x*x*y + fitvars['XTXTX']*x*x*x + fitvars['YTYTY']*y*y*y
epsilon = fitvars['X']*x + fitvars['Y']*y + fitvars['XTX']*x**2 + fitvars['YTY']*y**2 + fitvars['XTY']*x*y
epsilon = fitvars['X']*x + fitvars['Y']*y + fitvars['XTX']*x**2 + fitvars['YTY']*y**2 + fitvars['XTY']*x*y
#correction = 10.**(epsilon/2.5)
print 'writing'
hdu = pyfits.PrimaryHDU(epsilon)
os.system('rm /tmp/correction.fits')
hdu.writeto('/tmp/correction.fits')
print 'done'
return
def random_cmp(x,y):
import random
a = random.random()
b = random.random()
if a > b: return 1
else: return -1
def linear_fit():
maxSigIter=50
solutions = []
import pickle
''' get data '''
EXPS = getTableInfo()
print EXPS
#ROTS, data, err, X, Y, maxVal, classStar = diffCalcNew()
#save = {'ROTS': ROTS, 'data':data,'err':err,'X':X,'Y':Y,'maxVal':maxVal,'classStar':classStar}
#uu = open('/tmp/store','w')
#import pickle
#pickle.dump(save,uu)
#uu.close()
''' EXPS has all of the image information for different rotations '''
''' make model '''
#fit = make_model(EXPS)
#position_fit = make_position_model(EXPS)
print fit
if 1:
star_good,supas = selectGoodStars(EXPS)
uu = open('/tmp/store','w')
import pickle
pickle.dump({'star_good':star_good,'supas':supas},uu)
uu.close()
import pickle
f=open('/tmp/store','r')
m=pickle.Unpickler(f)
d=m.load()
star_good = d['star_good']
supas = d['supas']
print len(star_good)
if 0:
l = range(len(star_good))
print l[0:10]
l.sort(random_cmp)
print l[0:10]
''' shorten star_good, supas '''
star_good = [star_good[i] for i in l[0:800]]
supas = [supas[i] for i in l[0:800]]
print len(star_good)
print EXPS
print EXPS.keys(), EXPS[EXPS.keys()[0]]
print len(supas), len(star_good)
print supas[0:10]
columns = []
column_dict = {}
''' position-dependent terms in design matrix '''
position_columns = []
index = -1
#position_fit = [['Xpos_ABS','Xpos_ABS'],['Xpos_ABS','Ypos_ABS'],['Ypos_ABS','Ypos_ABS'],['Xpos_ABS'],['Ypos_ABS']]
#cheby_fit = [{'f':lambda x,y:x,'name':'T1X'},{'f':lambda x,y:2*x**2.-1,'name':'T2X'},{'f':lambda x,y:4*x**3.-3*x,'name':'T3X'},{'f':lambda x,y:y,'name':'T1Y'},{'f':lambda x,y:2*y**2.-1,'name':'T2Y'},{'f':lambda x,y:4*y**3.-3*y,'name':'T3Y'}]
#cheby_fit = [{'f':lambda x,y:x,'name':'T1X'},{'f':lambda x,y:2*x**2.-1,'name':'T2X'},{'f':lambda x,y:y,'name':'T1Y'},{'f':lambda x,y:2*y**2.-1,'name':'T2Y'}]
cheby_fit = [{'f':lambda x,y:x,'name':'T0'},{'f':lambda x,y:y,'name':'T1'},{'f':lambda x,y:2*x**2.-1,'name':'T2'},{'f':lambda x,y:2*y**2.-1,'name':'T3'},{'f':lambda x,y:(2*x**2.-1)*(2.*y**2.-1),'name':'T4'}]
for ROT in EXPS.keys():
for term in cheby_fit:
index += 1
name = str(ROT) + '#' + term['name'] # + reduce(lambda x,y: x + 'T' + y,term)
position_columns.append({'name':name,'term':term['f'],'rotation':ROT,'index':index})
print position_columns
''' zero point terms in design matrix '''
zp_columns = []
for ROT in EXPS.keys():
for exp in EXPS[ROT]:
zp_columns.append({'name':'zp_'+exp,'image':exp,'im_rotation':ROT})
print zp_columns
mag_columns = []
for star in supas:
mag_columns.append({'name':'mag_' + str(star['table index'])})
print mag_columns
''' total number of fit parameters summed over each rotation + total number of images of all rotations + total number of stars to fit '''
x_length = len(position_columns) + len(zp_columns) + len(mag_columns)
y_length = reduce(lambda x,y: x + y,[len(star['supa files']) for star in supas])
print x_length, y_length
import scipy
from pysparse import spmatrix
A = scipy.zeros([y_length,x_length])
B = scipy.zeros(y_length)
#A = spmatrix.ll_mat(y_length,x_length)
Af = open('A','w')
#B = spmatrix.ll_mat(y_length)
#B = scipy.zeros(y_length)
Bf = open('b','w')
comp_mag = scipy.zeros(len(supas))
print y_length, x_length
import astropy.io.fits as pyfits
p = pyfits.open('/tmp/final.cat')
table = p[1].data
Bstr = ''
row_num = -1
supa_num = -1
degeneracy_break = {}
for ROT in EXPS.keys():
degeneracy_break[ROT] = False
#degeneracy_break = False
''' each star '''
for star in supas:
supa_num += 1
comp_mag[supa_num] = table.field(str(star['supa files'][0]['rotation']) + '#' + star['supa files'][0]['name'] + '#MAG_APER2')[star['table index']]
''' each exp of each star '''
for exp in star['supa files']:
row_num += 1
col_num = -1
rotation = exp['rotation']
sigma = table.field(str(rotation) + '#' + exp['name'] + '#MAGERR_APER2')[star['table index']]
#if sigma < 0.001: sigma = 0.001
sigma = sigma * 1000.
#print table.field(str(rotation) + '#' + exp['name'] + '#MAGERR_APER2')[star['table index']]
for c in position_columns:
col_num += 1
if c['rotation'] == rotation:
n = str(rotation) + '#' + exp['name'] + '#Xpos_ABS'
#term_cont = [str(rotation) + '#' + exp['name'] + '#' + par for par in c['term']]
#A[row_num][col_num] = reduce(lambda x,y: x * y,[table.field(z)[star['table index']] for z in term_cont])/sigma
coord_conv = lambda x:(2.*x-0-10000)/(10000-0)
#print c['term'],c['term'](1,1)
x = table.field(str(rotation) + '#' + exp['name'] + '#Xpos_ABS')[star['table index']]
y = table.field(str(rotation) + '#' + exp['name'] + '#Ypos_ABS')[star['table index']]
x = coord_conv(x)
y = coord_conv(y)
#print table.field(str(rotation) + '#' + exp['name'] + '#Xpos_ABS')[star['table index']], table.field(str(rotation) + '#' + exp['name'] + '#Ypos_ABS')[star['table index']]
#print x,y
#print c['term'](x,y)
#raw_input()
A[row_num,col_num] = c['term'](x,y)/sigma
Af.write(str(row_num) + ' ' + str(col_num) + ' ' + str(c['term'](x,y)/sigma) + '\n')
for c in zp_columns:
col_num += 1
#print c['image'], exp['name']
#if not degeneracy_break and c['image'] == exp['name']:
if not degeneracy_break[c['im_rotation']] and c['image'] == exp['name']:
degeneracy_break[c['im_rotation']] = True
A[row_num,col_num] = 1./sigma
Af.write(str(row_num) + ' ' + str(col_num) + ' ' + str(1./sigma) + '\n')
#print col_num
''' magnitude column '''
col_num += 1
#print supa_num, col_num, row_num, x_length, y_length
A[row_num,col_num + supa_num] = 1./sigma
Af.write(str(row_num) + ' ' + str(col_num + supa_num) + ' ' + str(1./sigma) + '\n')
B[row_num] = table.field(str(rotation) + '#' + exp['name'] + '#MAG_APER2')[star['table index']]/sigma
Bstr += str(table.field(str(rotation) + '#' + exp['name'] + '#MAG_APER2')[star['table index']]/sigma) + ' '
Bf.write(Bstr[:-1])
Bf.close()
Af.close()
raw_input()
print A[0,0:30], B[0:10], scipy.shape(A), scipy.shape(B)
Af = open('/tmp/B','w')
for i in range(len(B)):
Af.write(str(B[i]) + '\n')
Af.close()
import re, os
os.system('a.out < A')
bout = open('x','r').read()
res = re.split('\s+',bout[:-1])
U = [float(x) for x in res]
#from scipy import linalg
#print 'doing linear algebra'
#U = linalg.lstsq(A,B)
#print U[0][0:30]
raw_input()
if 0:
from pysparse.pysparseUmfpack import PysparseUmfpackSolver
from pysparse.pysparseMatrix import PysparseMatrix
from pysparse import spmatrix, precon, itsolvers
A = PysparseMatrix(matrix=A)
print 'initialized'
S = PysparseUmfpackSolver(A)
print 'initialized'
S.solve(B)
raw_input()
import numpy
x = numpy.empty(len(B))
print A.shape, B.shape, x.shape
raw_input()
Aprime = A.to_csr()
info, iter, relres = itsolvers.qmrs(Aprime,B,x,1e-12,2000)
print 'done with linear algebra'
raw_input()
print len(U), len(U[0]),len(U[:][0]), len(position_columns), len(zp_columns)
print U[0]
print scipy.shape(A), scipy.shape(U[0]), len(U[0]) , scipy.shape(B), scipy.shape(A)
print 'hey'
print len(U[0][len(position_columns) + len(zp_columns) :]) , len(comp_mag )
print U[0][len(position_columns) + len(zp_columns) :] -comp_mag
print U[0][:len(position_columns) + len(zp_columns)]
position_fit = [['Xpos_ABS','Xpos_ABS'],['Xpos_ABS','Ypos_ABS'],['Ypos_ABS','Ypos_ABS'],['Xpos_ABS'],['Ypos_ABS']]
import re
for ROT in EXPS.keys():
print 'ROT', ROT
fitvars = {}
for ele in position_columns:
res = re.split('#',ele['name'])
if res[0] == ROT:
fitvars[ele['name'][2:]] = U[ele['index']]
print ele['name'], fitvars[ele['name'][2:]]
size_x=10000
size_y=10000
bin=100
import numpy, math, pyfits, os
x,y = numpy.meshgrid(numpy.arange(0,size_x,bin),numpy.arange(0,size_y,bin))
F=0.1
print 'calculating'
#epsilon = fitvars['Xpos_ABS']*x + fitvars['Ypos_ABS']*y + fitvars['Xpos_ABSTXpos_ABS']*x**2 + fitvars['Ypos_ABSTYpos_ABS']*y**2 + fitvars['Xpos_ABSTYpos_ABS']*x*y
coord_conv = lambda x:(2.*x-0-10000)/(10000-0)
#print c['term'],c['term'](1,1)
x = coord_conv(x)
y = coord_conv(y)
epsilon = fitvars['T0']*x + fitvars['T1']*y + fitvars['T2']*(2*x**2.-1) + fitvars['T3']*(2*y**2.-1) + fitvars['T4']*(2*x**2.-1)*(2.*y**2.-1)
#epsilon = fitvars['T1X']*x + fitvars['T2X']*(x**2.) + fitvars['T1Y']*y + fitvars['T2Y']*(y**2) + fitvars['T3Y']*(x*y)
#correction = 10.**(epsilon/2.5)
print 'writing'
hdu = pyfits.PrimaryHDU(epsilon)
os.system('rm /tmp/correction' + ROT + '.fits')
hdu.writeto('/tmp/correction' + ROT + '.fits')
print 'done'
return
def fit():
maxSigIter=50
solutions = []
import pickle
''' get data '''
EXPS = getTableInfo()
print EXPS
#ROTS, data, err, X, Y, maxVal, classStar = diffCalcNew()
#save = {'ROTS': ROTS, 'data':data,'err':err,'X':X,'Y':Y,'maxVal':maxVal,'classStar':classStar}
#uu = open('/tmp/store','w')
#import pickle
#pickle.dump(save,uu)
#uu.close()
''' EXPS has all of the image information for different rotations '''
''' make model '''
fit = make_model(EXPS)
print fit
star_good = selectGoodStars(EXPS)
uu = open('/tmp/store','w')
import pickle
pickle.dump(star_good,uu)
uu.close()
import pickle
f=open('/tmp/store','r')
m=pickle.Unpickler(f)
star_good=m.load()
fit['class'] = phot_funct(fit['model'],fit['fixed'],EXPS,star_good,fit['apply'])
import astropy.io.fits as pyfits
p = pyfits.open('/tmp/final.cat')
table = p[1].data
import copy
table_save = copy.copy(table)
for i in range(maxSigIter):
fa = {"table": table_save}
func = fit['class'].calc_model
#functkw takes input data arrays
#parinfo takes initial guess and constraints on parameters
#import optimize
#params, covar, info, mesg, ier = optimize.leastsq(func,guess,args = (points,vals,errs), full_output=True)
import mpfit
m = mpfit.mpfit(func, functkw=fa,
parinfo=fit['class'].parstart,
maxiter=1000, quiet=0)
print m.params, m.perror
if (m.status <= 0):
print 'error message = ', m.errmsg
condition = Numeric.zeros(len(data))
break
print m.params,m.perror
#fits = [{'vars':['zp','color1coeff','color1coeff2'],'parinfo':[{'value':p[0],'fixed':0},{'value':p[1],'fixed':0},{'value':p[2],'fixed':0},'function':phot_funct_secondorder,'fit_type':'no_airmass'}]
fit['class'].fitvars = {}
for ele in range(len(fit['class'].smodel)):
print ele, fit['class'].smodel
name = make_name(fit['class'].smodel[ele])
print ele, fit['class'].fitvars, name, m.params[ele]
fit['class'].fitvars[name] = m.params[ele]
fit['class'].fitvars[name + '_err'] = m.perror[ele]
perror = copy.copy(m.perror)
# Compute a 3 sigma rejection criterion
print m.params, data_rec[0], data[0]
#condition, redchisq = SigmaCond(params, data_save, data,
# airmass_save, airmass,
# color1_save, color1, color2_save, color2, err_save, err, sigmareject)
calcIllum(10000, 10000, 100, fit)
if len(data_save) > 1:
(mo_save, reddm) = fit['class'].calc_sigma(m.params, airmass_save, color1_save, color2_save, data_save, err_save, X_save, Y_save)
#reddm = (data-mo)/err
redchisq = Numeric.sqrt(Numeric.sum(Numeric.power(reddm, 2)) / (len(reddm) - 1))
dm = data_save-mo_save
#dm_save = data_save - mo_save
print len(data_save), len(mo_save)
dm_save = data_save - mo_save
mean = Numeric.sum(dm)/len(dm)
sigma = Numeric.sqrt(Numeric.sum(Numeric.power(mean-dm, 2)) / (len(dm) - 1))
# you can pick either
#condition = Numeric.less(Numeric.fabs(dm_save), float(sigmareject) * sigma)
condition = Numeric.less(Numeric.fabs(dm_save), float(sigmareject) * err_save)
else:
condition = Numeric.zeros(len(data_save))
print redchisq
# Keep everything (from the full data set!) that is within
# the 3 sigma criterion
#data_sig = Numeric.compress(condition, data_save)
data = Numeric.compress(condition, data_rec)
err = Numeric.compress(condition, err_save)
X = Numeric.compress(condition, X_save)
Y = Numeric.compress(condition, Y_save)
new_len = len(data)
if float(new_len)/float(save_len) < 0.5:
print "Rejected more than 50% of all measurements."
print "Aborting this fit."
break
# No change
if new_len == old_len:
print "Converged! (%d iterations)" % (i+1, )
print "Kept %d/%d stars." % (new_len, save_len)
break
#print params, perror, condition
meanerr = Numeric.sum(err_save)/len(err_save)
def make_name(name):
if len(name) > 1:
name = reduce(lambda x,y: x + 'T' + y,name)
else:
name = name[0]
return name
|
deapplegate/wtgpipeline
|
non_essentials/s1.py
|
Python
|
mit
| 119,614
|
[
"Galaxy"
] |
e1391edaa1caba687b7ed4e9d8bcbfe21d0ffac02d8c15e8a2f298000d56c2b9
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2014-2021 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors: Qiming Sun <osirpt.sun@gmail.com>
# Susi Lehtola <susi.lehtola@gmail.com>
'''
XC functional, the interface to libxc
(http://www.tddft.org/programs/octopus/wiki/index.php/Libxc)
'''
import sys
import warnings
import copy
import ctypes
import math
import numpy
from pyscf import lib
from pyscf.dft.xc.utils import remove_dup, format_xc_code
from pyscf import __config__
_itrf = lib.load_library('libxc_itrf')
_itrf.LIBXC_is_lda.restype = ctypes.c_int
_itrf.LIBXC_is_gga.restype = ctypes.c_int
_itrf.LIBXC_is_meta_gga.restype = ctypes.c_int
_itrf.LIBXC_needs_laplacian.restype = ctypes.c_int
_itrf.LIBXC_needs_laplacian.argtypes = [ctypes.c_int]
_itrf.LIBXC_is_hybrid.restype = ctypes.c_int
_itrf.LIBXC_is_cam_rsh.restype = ctypes.c_int
_itrf.LIBXC_max_deriv_order.restype = ctypes.c_int
_itrf.LIBXC_number_of_functionals.restype = ctypes.c_int
_itrf.LIBXC_functional_numbers.argtypes = (numpy.ctypeslib.ndpointer(dtype=numpy.intc, ndim=1, flags=("W", "C", "A")), )
_itrf.LIBXC_functional_name.argtypes = [ctypes.c_int]
_itrf.LIBXC_functional_name.restype = ctypes.c_char_p
_itrf.LIBXC_hybrid_coeff.argtypes = [ctypes.c_int]
_itrf.LIBXC_hybrid_coeff.restype = ctypes.c_double
_itrf.LIBXC_nlc_coeff.argtypes = [ctypes.c_int,ctypes.POINTER(ctypes.c_double)]
_itrf.LIBXC_rsh_coeff.argtypes = [ctypes.c_int,ctypes.POINTER(ctypes.c_double)]
_itrf.LIBXC_version.restype = ctypes.c_char_p
_itrf.LIBXC_reference.restype = ctypes.c_char_p
_itrf.LIBXC_reference_doi.restype = ctypes.c_char_p
_itrf.LIBXC_xc_reference.argtypes = [ctypes.c_int, (ctypes.c_char_p * 8)]
def libxc_version():
'''Returns the version of libxc'''
return _itrf.LIBXC_version().decode("UTF-8")
def libxc_reference():
'''Returns the reference to libxc'''
return _itrf.LIBXC_reference().decode("UTF-8")
def libxc_reference_doi():
'''Returns the reference to libxc'''
return _itrf.LIBXC_reference_doi().decode("UTF-8")
__version__ = libxc_version()
__reference__ = libxc_reference()
__reference_doi__ = libxc_reference_doi()
# Runtime detection of available functionals
dynamic_func = getattr(__config__, 'dft_libxc_dynamic', False)
if dynamic_func:
def available_libxc_functionals():
# Number of functionals is
nfunc = _itrf.LIBXC_number_of_functionals()
# Get functional numbers
numbers = numpy.zeros(nfunc, dtype=numpy.intc)
_itrf.LIBXC_functional_numbers(numbers)
# Returned array
return {_itrf.LIBXC_functional_name(x).decode("UTF-8").upper() : x for x in numbers}
XC = XC_CODES = available_libxc_functionals()
PROBLEMATIC_XC = dict([])
else:
# XC dict is generated by
#import pylibxc
#for xcname in pylibxc.util.xc_available_functional_names():
# f = pylibxc.LibXCFunctional(xcname, 1)
# f_id = f.get_number()
# ref = f.get_references()
# key = f"'{xcname.upper()}'"
# print(f"{key:<31s}: {f_id:<3d}, # {ref[0]}")
XC = XC_CODES = {
'LDA_C_1D_CSC' : 18 , # M. Casula, S. Sorella, and G. Senatore, Phys. Rev. B 74, 245427 (2006)
'LDA_C_1D_LOOS' : 26 , # P.-F. Loos, J. Chem. Phys. 138, 064108 (2013)
'LDA_C_2D_AMGB' : 15 , # C. Attaccalite, S. Moroni, P. Gori-Giorgi, and G. B. Bachelet, Phys. Rev. Lett. 88, 256601 (2002)
'LDA_C_2D_PRM' : 16 , # S. Pittalis, E. Rasanen, and M. A. L. Marques, Phys. Rev. B 78, 195322 (2008)
'LDA_C_BR78' : 552, # G. B. Jr. and S. M. Rothstein, J. Chem. Phys. 69, 1177 (1978)
'LDA_C_CHACHIYO' : 287, # T. Chachiyo, J. Chem. Phys. 145, 021101 (2016)
'LDA_C_CHACHIYO_MOD' : 307, # T. Chachiyo and H. Chachiyo, Comput. Theor. Chem. 1172, 112669 (2020)
'LDA_C_GK72' : 578, # R. G. Gordon and Y. S. Kim, J. Chem. Phys. 56, 3122 (1972)
'LDA_C_GL' : 5 , # O. Gunnarsson and B. I. Lundqvist, Phys. Rev. B 13, 4274 (1976)
'LDA_C_GOMBAS' : 24 , # P. Gombas, Fortschr. Phys. 13, 137 (1965)
'LDA_C_HL' : 4 , # L. Hedin and B. I. Lundqvist, J. Phys. C: Solid State Phys. 4, 2064 (1971)
'LDA_C_KARASIEV' : 579, # V. V. Karasiev, J. Chem. Phys. 145, 157101 (2016)
'LDA_C_KARASIEV_MOD' : 308, # T. Chachiyo and H. Chachiyo, Comput. Theor. Chem. 1172, 112669 (2020)
'LDA_C_LP96' : 289, # S. Liu and R. G. Parr, Phys. Rev. A 53, 2211 (1996)
'LDA_C_MCWEENY' : 551, # R. McWeeny, in The New World of Quantum Chemistry, edited by B. Pullman and R. Parr (Reidel, Boston, 1976) pp. 3--31
'LDA_C_ML1' : 22 , # E. I. Proynov and D. R. Salahub, Phys. Rev. B 49, 7874 (1994)
'LDA_C_ML2' : 23 , # E. I. Proynov and D. R. Salahub, Phys. Rev. B 49, 7874 (1994)
'LDA_C_OB_PW' : 14 , # G. Ortiz and P. Ballone, Phys. Rev. B 50, 1391 (1994)
'LDA_C_OB_PZ' : 11 , # G. Ortiz and P. Ballone, Phys. Rev. B 50, 1391 (1994)
'LDA_C_OW' : 574, # P. A. Stewart and P. M. W. Gill, J. Chem. Soc., Faraday Trans. 91, 4337 (1995)
'LDA_C_OW_LYP' : 573, # P. A. Stewart and P. M. W. Gill, J. Chem. Soc., Faraday Trans. 91, 4337 (1995)
'LDA_C_PK09' : 554, # E. Proynov and J. Kong, Phys. Rev. A 79, 014103 (2009)
'LDA_C_PMGB06' : 590, # S. Paziani, S. Moroni, P. Gori-Giorgi, and G. B. Bachelet, Phys. Rev. B 73, 155111 (2006)
'LDA_C_PW' : 12 , # J. P. Perdew and Y. Wang, Phys. Rev. B 45, 13244 (1992)
'LDA_C_PW_MOD' : 13 , # J. P. Perdew and Y. Wang, Phys. Rev. B 45, 13244 (1992), added extra digits to some constants as in the PBE routine (http://dft.rutgers.edu/pubs/PBE.asc)
'LDA_C_PW_RPA' : 25 , # J. P. Perdew and Y. Wang, Phys. Rev. B 45, 13244 (1992)
'LDA_C_PZ' : 9 , # J. P. Perdew and A. Zunger, Phys. Rev. B 23, 5048 (1981)
'LDA_C_PZ_MOD' : 10 , # J. P. Perdew and A. Zunger, Phys. Rev. B 23, 5048 (1981), modified to improve the matching between the low- and high-rs parts
'LDA_C_RC04' : 27 , # S. Ragot and P. Cortona, J. Chem. Phys. 121, 7671 (2004)
'LDA_C_RPA' : 3 , # M. Gell-Mann and K. A. Brueckner, Phys. Rev. 106, 364 (1957)
'LDA_C_RPW92' : 684, # M. Ruggeri, P. L. Rios, and A. Alavi, Phys. Rev. B 98, 161105 (2018)
'LDA_C_UPW92' : 683, # M. Ruggeri, P. L. Rios, and A. Alavi, Phys. Rev. B 98, 161105 (2018)
'LDA_C_VBH' : 17 , # U. von Barth and L. Hedin, J. Phys. C: Solid State Phys. 5, 1629 (1972)
'LDA_C_VWN' : 7 , # S. H. Vosko, L. Wilk, and M. Nusair, Can. J. Phys. 58, 1200 (1980)
'LDA_C_VWN_1' : 28 , # S. H. Vosko, L. Wilk, and M. Nusair, Can. J. Phys. 58, 1200 (1980)
'LDA_C_VWN_2' : 29 , # S. H. Vosko, L. Wilk, and M. Nusair, Can. J. Phys. 58, 1200 (1980)
'LDA_C_VWN_3' : 30 , # S. H. Vosko, L. Wilk, and M. Nusair, Can. J. Phys. 58, 1200 (1980)
'LDA_C_VWN_4' : 31 , # S. H. Vosko, L. Wilk, and M. Nusair, Can. J. Phys. 58, 1200 (1980)
'LDA_C_VWN_RPA' : 8 , # S. H. Vosko, L. Wilk, and M. Nusair, Can. J. Phys. 58, 1200 (1980)
'LDA_C_WIGNER' : 2 , # E. Wigner, Trans. Faraday Soc. 34, 678 (1938)
'LDA_C_XALPHA' : 6 , # J. C. Slater, Phys. Rev. 81, 385 (1951)
'LDA_K_GDS08_WORKER' : 100001, # L. M. Ghiringhelli and L. Delle Site, Phys. Rev. B 77, 073104 (2008)
'LDA_K_LP' : 51 , # C. Lee and R. G. Parr, Phys. Rev. A 35, 2377 (1987)
'LDA_K_LP96' : 580, # S. Liu and R. G. Parr, Phys. Rev. A 53, 2211 (1996)
'LDA_K_TF' : 50 , # L. H. Thomas, Math. Proc. Cambridge Philos. Soc. 23, 542 (1927)
'LDA_K_ZLP' : 550, # P. Fuentealba and O. Reyes, Chem. Phys. Lett. 232, 31 (1995)
'LDA_X' : 1 , # P. A. M. Dirac, Math. Proc. Cambridge Philos. Soc. 26, 376 (1930)
'LDA_X_1D_EXPONENTIAL' : 600, # N. Helbig, J. I. Fuks, M. Casula, M. J. Verstraete, M. A. L. Marques, I. V. Tokatly, and A. Rubio, Phys. Rev. A 83, 032503 (2011)
'LDA_X_1D_SOFT' : 21 , # N. Helbig, J. I. Fuks, M. Casula, M. J. Verstraete, M. A. L. Marques, I. V. Tokatly, and A. Rubio, Phys. Rev. A 83, 032503 (2011)
'LDA_X_2D' : 19 , # P. A. M. Dirac, Math. Proc. Cambridge Philos. Soc. 26, 376 (1930)
'LDA_X_ERF' : 546, # P. M. W. Gill, R. D. Adamson, and J. A. Pople, Mol. Phys. 88, 1005 (1996)
'LDA_X_RAE' : 549, # A. Rae, Chem. Phys. Lett. 18, 574 (1973)
'LDA_X_REL' : 532, # A. K. Rajagopal, J. Phys. C: Solid State Phys. 11, L943 (1978)
'LDA_X_SLOC' : 692, # K. Finzel and A. I. Baranov, Int. J. Quantum Chem. 117, 40 (2017)
'LDA_XC_1D_EHWLRG_1' : 536, # M. T. Entwistle, M. J. P. Hodgson, J. Wetherell, B. Longstaff, J. D. Ramsden, and R. W. Godby, Phys. Rev. B 94, 205134 (2016)
'LDA_XC_1D_EHWLRG_2' : 537, # M. T. Entwistle, M. J. P. Hodgson, J. Wetherell, B. Longstaff, J. D. Ramsden, and R. W. Godby, Phys. Rev. B 94, 205134 (2016)
'LDA_XC_1D_EHWLRG_3' : 538, # M. T. Entwistle, M. J. P. Hodgson, J. Wetherell, B. Longstaff, J. D. Ramsden, and R. W. Godby, Phys. Rev. B 94, 205134 (2016)
'LDA_XC_BN05' : 588, # R. Baer and D. Neuhauser, Phys. Rev. Lett. 94, 043002 (2005)
'LDA_XC_GDSMFB' : 577, # S. Groth, T. Dornheim, T. Sjostrom, F. D. Malone, W. M. C. Foulkes, and M. Bonitz, Phys. Rev. Lett. 119, 135001 (2017)
'LDA_XC_KSDT' : 259, # V. V. Karasiev, T. Sjostrom, J. Dufty, and S. B. Trickey, Phys. Rev. Lett. 112, 076403 (2014)
'LDA_XC_LP_A' : 547, # C. Lee and R. G. Parr, Phys. Rev. A 42, 193 (1990)
'LDA_XC_LP_B' : 548, # C. Lee and R. G. Parr, Phys. Rev. A 42, 193 (1990)
'LDA_XC_TETER93' : 20 , # S. Goedecker, M. Teter, and J. Hutter, Phys. Rev. B 54, 1703 (1996)
'LDA_XC_TIH' : 599, # D. J. Tozer, V. E. Ingamells, and N. C. Handy, J. Chem. Phys. 105, 9200 (1996)
'LDA_XC_ZLP' : 43 , # Q. Zhao, M. Levy, and R. G. Parr, Phys. Rev. A 47, 918 (1993)
'HYB_LDA_XC_CAM_LDA0' : 178, # M. A. Mosquera, C. H. Borca, M. A. Ratner, and G. C. Schatz, J. Phys. Chem. A 120, 1605 (2016)
'HYB_LDA_XC_LDA0' : 177, # P. Rinke, A. Schleife, E. Kioupakis, A. Janotti, C. Rodl, F. Bechstedt, M. Scheffler, and C. G. Van de Walle, Phys. Rev. Lett. 108, 126404 (2012)
'GGA_C_ACGGA' : 39 , # A. Cancio, G. P. Chen, B. T. Krull, and K. Burke, J. Chem. Phys. 149, 084116 (2018)
'GGA_C_ACGGAP' : 176, # A. Cancio, G. P. Chen, B. T. Krull, and K. Burke, J. Chem. Phys. 149, 084116 (2018)
'GGA_C_AM05' : 135, # R. Armiento and A. E. Mattsson, Phys. Rev. B 72, 085108 (2005)
'GGA_C_APBE' : 186, # L. A. Constantin, E. Fabiano, S. Laricchia, and F. Della Sala, Phys. Rev. Lett. 106, 186406 (2011)
'GGA_C_BMK' : 280, # A. D. Boese and J. M. L. Martin, J. Chem. Phys. 121, 3405 (2004)
'GGA_C_CCDF' : 313, # J. T. Margraf, C. Kunkel, and K. Reuter, J. Chem. Phys. 150, 244116 (2019)
'GGA_C_CHACHIYO' : 309, # T. Chachiyo and H. Chachiyo, Comput. Theor. Chem. 1172, 112669 (2020)
'GGA_C_CS1' : 565, # N. C. Handy and A. J. Cohen, J. Chem. Phys. 116, 5411 (2002)
'GGA_C_FT97' : 88 , # M. Filatov and W. Thiel, Int. J. Quantum Chem. 62, 603 (1997)
'GGA_C_GAM' : 33 , # H. S. Yu, W. Zhang, P. Verma, X. He, and D. G. Truhlar, Phys. Chem. Chem. Phys. 17, 12146 (2015)
'GGA_C_GAPC' : 555, # E. Fabiano, P. E. Trevisanutto, A. Terentjevs, and L. A. Constantin, J. Chem. Theory Comput. 10, 2016 (2014)
'GGA_C_GAPLOC' : 556, # E. Fabiano, P. E. Trevisanutto, A. Terentjevs, and L. A. Constantin, J. Chem. Theory Comput. 10, 2016 (2014)
'GGA_C_HCTH_A' : 97 , # F. A. Hamprecht, A. J. Cohen, D. J. Tozer, and N. C. Handy, J. Chem. Phys. 109, 6264 (1998)
'GGA_C_HYB_TAU_HCTH' : 283, # A. D. Boese and N. C. Handy, J. Chem. Phys. 116, 9559 (2002)
'GGA_C_LM' : 137, # D. C. Langreth and M. J. Mehl, Phys. Rev. Lett. 47, 446 (1981)
'GGA_C_LYP' : 131, # C. Lee, W. Yang, and R. G. Parr, Phys. Rev. B 37, 785 (1988)
'GGA_C_MGGAC' : 712, # B. Patra, S. Jana, L. A. Constantin, and P. Samal, Phys. Rev. B 100, 155140 (2019)
'GGA_C_N12' : 80 , # R. Peverati and D. G. Truhlar, J. Chem. Theory Comput. 8, 2310 (2012)
'GGA_C_N12_SX' : 79 , # R. Peverati and D. G. Truhlar, Phys. Chem. Chem. Phys. 14, 16187 (2012)
'GGA_C_OP_B88' : 87 , # T. Tsuneda, T. Suzumura, and K. Hirao, J. Chem. Phys. 110, 10664 (1999)
'GGA_C_OP_G96' : 85 , # T. Tsuneda, T. Suzumura, and K. Hirao, J. Chem. Phys. 110, 10664 (1999)
'GGA_C_OP_PBE' : 86 , # T. Tsuneda, T. Suzumura, and K. Hirao, J. Chem. Phys. 110, 10664 (1999)
'GGA_C_OP_PW91' : 262, # T. Tsuneda, T. Suzumura, and K. Hirao, J. Chem. Phys. 110, 10664 (1999)
'GGA_C_OP_XALPHA' : 84 , # T. Tsuneda, T. Suzumura, and K. Hirao, J. Chem. Phys. 110, 10664 (1999)
'GGA_C_OPTC' : 200, # A. J. Cohen and N. C. Handy, Mol. Phys. 99, 607 (2001)
'GGA_C_P86' : 132, # J. P. Perdew, Phys. Rev. B 33, 8822 (1986)
'GGA_C_P86_FT' : 217, # J. P. Perdew, Phys. Rev. B 33, 8822 (1986)
'GGA_C_P86VWN' : 252, # J. P. Perdew, Phys. Rev. B 33, 8822 (1986)
'GGA_C_P86VWN_FT' : 253, # J. P. Perdew, Phys. Rev. B 33, 8822 (1986)
'GGA_C_PBE' : 130, # J. P. Perdew, K. Burke, and M. Ernzerhof, Phys. Rev. Lett. 77, 3865 (1996)
'GGA_C_PBE_JRGX' : 138, # L. S. Pedroza, A. J. R. da Silva, and K. Capelle, Phys. Rev. B 79, 201106 (2009)
'GGA_C_PBE_MOL' : 272, # J. M. del Campo, J. L. Gazquez, S. B. Trickey, and A. Vela, J. Chem. Phys. 136, 104108 (2012)
'GGA_C_PBE_SOL' : 133, # J. P. Perdew, A. Ruzsinszky, G. I. Csonka, O. A. Vydrov, G. E. Scuseria, L. A. Constantin, X. Zhou, and K. Burke, Phys. Rev. Lett. 100, 136406 (2008)
'GGA_C_PBE_VWN' : 216, # E. Kraisler, G. Makov, and I. Kelson, Phys. Rev. A 82, 042516 (2010)
'GGA_C_PBEFE' : 258, # R. Sarmiento-Perez, S. Botti, and M. A. L. Marques, J. Chem. Theory Comput. 11, 3844 (2015)
'GGA_C_PBEINT' : 62 , # E. Fabiano, L. A. Constantin, and F. Della Sala, Phys. Rev. B 82, 113104 (2010)
'GGA_C_PBELOC' : 246, # L. A. Constantin, E. Fabiano, and F. Della Sala, Phys. Rev. B 86, 035130 (2012)
'GGA_C_PW91' : 134, # J. P. Perdew, in Proceedings of the 75. WE-Heraeus-Seminar and 21st Annual International Symposium on Electronic Structure of Solids, edited by P. Ziesche and H. Eschrig (Akademie Verlag, Berlin, 1991) p. 11
'GGA_C_Q2D' : 47 , # L. Chiodo, L. A. Constantin, E. Fabiano, and F. Della Sala, Phys. Rev. Lett. 108, 126402 (2012)
'GGA_C_REGTPSS' : 83 , # J. P. Perdew, A. Ruzsinszky, G. I. Csonka, L. A. Constantin, and J. Sun, Phys. Rev. Lett. 103, 026403 (2009)
'GGA_C_REVTCA' : 99 , # V. Tognetti, P. Cortona, and C. Adamo, Chem. Phys. Lett. 460, 536 (2008)
'GGA_C_RGE2' : 143, # A. Ruzsinszky, G. I. Csonka, and G. E. Scuseria, J. Chem. Theory Comput. 5, 763 (2009)
'GGA_C_SCAN_E0' : 553, # J. Sun, A. Ruzsinszky, and J. P. Perdew, Phys. Rev. Lett. 115, 036402 (2015)
'GGA_C_SG4' : 534, # L. A. Constantin, A. Terentjevs, F. Della Sala, P. Cortona, and E. Fabiano, Phys. Rev. B 93, 045126 (2016)
'GGA_C_SOGGA11' : 152, # R. Peverati, Y. Zhao, and D. G. Truhlar, J. Phys. Chem. Lett. 2, 1991 (2011)
'GGA_C_SOGGA11_X' : 159, # R. Peverati and D. G. Truhlar, J. Chem. Phys. 135, 191102 (2011)
'GGA_C_SPBE' : 89 , # M. Swart, M. Sola, and F. M. Bickelhaupt, J. Chem. Phys. 131, 094103 (2009)
'GGA_C_TAU_HCTH' : 281, # A. D. Boese and N. C. Handy, J. Chem. Phys. 116, 9559 (2002)
'GGA_C_TCA' : 100, # V. Tognetti, P. Cortona, and C. Adamo, J. Chem. Phys. 128, 034101 (2008)
'GGA_C_TM_LYP' : 559, # A. J. Thakkar and S. P. McCarthy, J. Chem. Phys. 131, 134109 (2009)
'GGA_C_TM_PBE' : 560, # A. J. Thakkar and S. P. McCarthy, J. Chem. Phys. 131, 134109 (2009)
'GGA_C_W94' : 561, # L. C. Wilson, Chem. Phys. 181, 337 (1994)
'GGA_C_WI' : 148, # L. C. Wilson and S. Ivanov, Int. J. Quantum Chem. 69, 523 (1998)
'GGA_C_WI0' : 153, # L. C. Wilson and S. Ivanov, Int. J. Quantum Chem. 69, 523 (1998)
'GGA_C_WL' : 147, # L. C. Wilson and M. Levy, Phys. Rev. B 41, 12930 (1990)
'GGA_C_XPBE' : 136, # X. Xu and W. A. Goddard, J. Chem. Phys. 121, 4068 (2004)
'GGA_C_ZPBEINT' : 61 , # L. A. Constantin, E. Fabiano, and F. Della Sala, Phys. Rev. B 84, 233103 (2011)
'GGA_C_ZPBESOL' : 63 , # L. A. Constantin, E. Fabiano, and F. Della Sala, Phys. Rev. B 84, 233103 (2011)
'GGA_C_ZVPBEINT' : 557, # L. A. Constantin, E. Fabiano, and F. D. Sala, J. Chem. Phys. 137, 194105 (2012)
'GGA_C_ZVPBELOC' : 606, # E. Fabiano, L. A. Constantin, P. Cortona, and F. Della Sala, J. Chem. Theory Comput. 11, 122 (2015)
'GGA_C_ZVPBESOL' : 558, # L. A. Constantin, E. Fabiano, and F. D. Sala, J. Chem. Phys. 137, 194105 (2012)
'GGA_K_ABSP1' : 506, # P. K. Acharya, L. J. Bartolotti, S. B. Sears, and R. G. Parr, Proc. Natl. Acad. Sci. U. S. A. 77, 6978 (1980)
'GGA_K_ABSP2' : 507, # P. K. Acharya, L. J. Bartolotti, S. B. Sears, and R. G. Parr, Proc. Natl. Acad. Sci. U. S. A. 77, 6978 (1980)
'GGA_K_ABSP3' : 277, # P. K. Acharya, L. J. Bartolotti, S. B. Sears, and R. G. Parr, Proc. Natl. Acad. Sci. U. S. A. 77, 6978 (1980)
'GGA_K_ABSP4' : 278, # P. K. Acharya, L. J. Bartolotti, S. B. Sears, and R. G. Parr, Proc. Natl. Acad. Sci. U. S. A. 77, 6978 (1980)
'GGA_K_APBE' : 185, # L. A. Constantin, E. Fabiano, S. Laricchia, and F. Della Sala, Phys. Rev. Lett. 106, 186406 (2011)
'GGA_K_APBEINT' : 54 , # S. Laricchia, E. Fabiano, L. A. Constantin, and F. Della Sala, J. Chem. Theory Comput. 7, 2439 (2011)
'GGA_K_BALTIN' : 504, # R. Baltin, Z. Naturforsch. A 27, 1176 (1972)
'GGA_K_DK' : 516, # A. E. DePristo and J. D. Kress, Phys. Rev. A 35, 438 (1987)
'GGA_K_ERNZERHOF' : 520, # M. Ernzerhof, J. Mol. Struct.: THEOCHEM 501--502, 59 (2000)
'GGA_K_EXP4' : 597, # V. V. Karasiev, S. B. Trickey, and F. E. Harris, J. Comput.-Aided Mater. Des. 13, 111 (2006)
'GGA_K_FR_B88' : 514, # P. Fuentealba and O. Reyes, Chem. Phys. Lett. 232, 31 (1995)
'GGA_K_FR_PW86' : 515, # P. Fuentealba and O. Reyes, Chem. Phys. Lett. 232, 31 (1995)
'GGA_K_GDS08' : 591, # L. M. Ghiringhelli and L. Delle Site, Phys. Rev. B 77, 073104 (2008)
'GGA_K_GE2' : 501, # A. S. Kompaneets and E. S. Pavlovskii, Zh. Eksp. Teor. Fiz. 31, 427 (1956), [J. Exp. Theor. Phys. 4, 328 (1957)]
'GGA_K_GHDS10' : 592, # L. M. Ghiringhelli, I. P. Hamilton, and L. D. Site, J. Chem. Phys. 132, 014106 (2010)
'GGA_K_GHDS10R' : 593, # S. B. Trickey, V. V. Karasiev, and A. Vela, Phys. Rev. B 84, 075146 (2011)
'GGA_K_GOLDEN' : 502, # S. Golden, Phys. Rev. 105, 604 (1957)
'GGA_K_GP85' : 510, # S. K. Ghosh and R. G. Parr, J. Chem. Phys. 82, 3307 (1985)
'GGA_K_GR' : 508, # J. L. Gazquez and J. Robles, J. Chem. Phys. 76, 1467 (1982)
'GGA_K_LC94' : 521, # A. Lembarki and H. Chermette, Phys. Rev. A 50, 5328 (1994)
'GGA_K_LGAP' : 620, # L. A. Constantin, E. Fabiano, S. Smiga, and F. Della Sala, Phys. Rev. B 95, 115153 (2017)
'GGA_K_LGAP_GE' : 633, # L. A. Constantin, E. Fabiano, S. Smiga, and F. Della Sala, Phys. Rev. B 95, 115153 (2017)
'GGA_K_LIEB' : 505, # E. H. Lieb, Rev. Mod. Phys. 53, 603 (1981)
'GGA_K_LKT' : 613, # K. Luo, V. V. Karasiev, and S. B. Trickey, Phys. Rev. B 98, 041111 (2018)
'GGA_K_LLP' : 522, # H. Lee, C. Lee, and R. G. Parr, Phys. Rev. A 44, 768 (1991)
'GGA_K_LUDENA' : 509, # E. V. Ludena, in Cond. Matt. Theor., Vol. 1, edited by F. B. Malik (Plenum, New York, 1986) p. 183
'GGA_K_MEYER' : 57 , # A. Meyer, G. C. Wang, and W. H. Young, Z. Naturforsch. A 31, 898 (1976)
'GGA_K_OL1' : 512, # H. Ou-Yang and M. Levy, Int. J. Quantum Chem. 40, 379 (1991)
'GGA_K_OL2' : 513, # H. Ou-Yang and M. Levy, Int. J. Quantum Chem. 40, 379 (1991)
'GGA_K_PBE2' : 616, # V. V. Karasiev, S. B. Trickey, and F. E. Harris, J. Comput.-Aided Mater. Des. 13, 111 (2006)
'GGA_K_PBE3' : 595, # V. V. Karasiev, S. B. Trickey, and F. E. Harris, J. Comput.-Aided Mater. Des. 13, 111 (2006)
'GGA_K_PBE4' : 596, # V. V. Karasiev, S. B. Trickey, and F. E. Harris, J. Comput.-Aided Mater. Des. 13, 111 (2006)
'GGA_K_PEARSON' : 511, # D. J. Lacks and R. G. Gordon, J. Chem. Phys. 100, 4446 (1994)
'GGA_K_PERDEW' : 517, # J. P. Perdew, Phys. Lett. A 165, 79 (1992)
'GGA_K_PG1' : 219, # L. A. Constantin, E. Fabiano, and F. Della Sala, J. Phys. Chem. Lett. 9, 4385 (2018), pMID: 30019904
'GGA_K_RATIONAL_P' : 218, # J. Lehtomaki and O. Lopez-Acevedo, Phys. Rev. B 100, 165111 (2019)
'GGA_K_REVAPBE' : 55 , # L. A. Constantin, E. Fabiano, S. Laricchia, and F. Della Sala, Phys. Rev. Lett. 106, 186406 (2011)
'GGA_K_REVAPBEINT' : 53 , # S. Laricchia, E. Fabiano, L. A. Constantin, and F. Della Sala, J. Chem. Theory Comput. 7, 2439 (2011)
'GGA_K_TFVW' : 52 , # C. F. von Weizsacker, Z. Phys. 96, 431 (1935)
'GGA_K_TFVW_OPT' : 635, # L. A. Espinosa Leal, A. Karpenko, M. A. Caro, and O. Lopez-Acevedo, Phys. Chem. Chem. Phys. 17, 31463 (2015)
'GGA_K_THAKKAR' : 523, # A. J. Thakkar, Phys. Rev. A 46, 6920 (1992)
'GGA_K_TKVLN' : 594, # S. B. Trickey, V. V. Karasiev, and A. Vela, Phys. Rev. B 84, 075146 (2011)
'GGA_K_TW1' : 187, # F. Tran and T. A. Wesolowski, Int. J. Quantum Chem. 89, 441 (2002)
'GGA_K_TW2' : 188, # F. Tran and T. A. Wesolowski, Int. J. Quantum Chem. 89, 441 (2002)
'GGA_K_TW3' : 189, # F. Tran and T. A. Wesolowski, Int. J. Quantum Chem. 89, 441 (2002)
'GGA_K_TW4' : 190, # F. Tran and T. A. Wesolowski, Int. J. Quantum Chem. 89, 441 (2002)
'GGA_K_VJKS' : 519, # L. Vitos, B. Johansson, J. Kollar, and H. L. Skriver, Phys. Rev. A 61, 052511 (2000)
'GGA_K_VSK' : 518, # L. Vitos, H. L. Skriver, and J. Kollar, Phys. Rev. B 57, 12611 (1998)
'GGA_K_VT84F' : 619, # V. V. Karasiev, D. Chakraborty, O. A. Shukruto, and S. B. Trickey, Phys. Rev. B 88, 161108 (2013)
'GGA_K_VW' : 500, # C. F. von Weizsacker, Z. Phys. 96, 431 (1935)
'GGA_K_YT65' : 503, # K. Yonei and Y. Tomishima, J. Phys. Soc. Jpn. 20, 1051 (1965)
'GGA_X_2D_B86' : 128, # J. G. Vilhena, E. Rasanen, M. A. L. Marques, and S. Pittalis, J. Chem. Theory Comput. 10, 1837 (2014)
'GGA_X_2D_B86_MGC' : 124, # S. Pittalis, E. Rasanen, J. G. Vilhena, and M. A. L. Marques, Phys. Rev. A 79, 012503 (2009)
'GGA_X_2D_B88' : 127, # J. G. Vilhena, E. Rasanen, M. A. L. Marques, and S. Pittalis, J. Chem. Theory Comput. 10, 1837 (2014)
'GGA_X_2D_PBE' : 129, # J. G. Vilhena, E. Rasanen, M. A. L. Marques, and S. Pittalis, J. Chem. Theory Comput. 10, 1837 (2014)
'GGA_X_AIRY' : 192, # L. A. Constantin, A. Ruzsinszky, and J. P. Perdew, Phys. Rev. B 80, 035125 (2009)
'GGA_X_AK13' : 56 , # R. Armiento and S. Kummel, Phys. Rev. Lett. 111, 036402 (2013)
'GGA_X_AM05' : 120, # R. Armiento and A. E. Mattsson, Phys. Rev. B 72, 085108 (2005)
'GGA_X_APBE' : 184, # L. A. Constantin, E. Fabiano, S. Laricchia, and F. Della Sala, Phys. Rev. Lett. 106, 186406 (2011)
'GGA_X_B86' : 103, # A. D. Becke, J. Chem. Phys. 84, 4524 (1986)
'GGA_X_B86_MGC' : 105, # A. D. Becke, J. Chem. Phys. 84, 4524 (1986)
'GGA_X_B86_R' : 41 , # I. Hamada, Phys. Rev. B 89, 121103 (2014)
'GGA_X_B88' : 106, # A. D. Becke, Phys. Rev. A 38, 3098 (1988)
'GGA_X_B88_6311G' : 179, # J. M. Ugalde, C. Sarasola, and M. Aguado, J. Phys. B: At., Mol. Opt. Phys. 27, 423 (1994)
'GGA_X_B88M' : 570, # E. Proynov, H. Chermette, and D. R. Salahub, J. Chem. Phys. 113, 10013 (2000)
'GGA_X_BAYESIAN' : 125, # J. J. Mortensen, K. Kaasbjerg, S. L. Frederiksen, J. K. Norskov, J. P. Sethna, and K. W. Jacobsen, Phys. Rev. Lett. 95, 216401 (2005)
'GGA_X_BCGP' : 38 , # K. Burke, A. Cancio, T. Gould, and S. Pittalis, ArXiv e-prints (2014), arXiv:1409.4834 [cond-mat.mtrl-sci]
'GGA_X_BEEFVDW' : 285, # J. Wellendorff, K. T. Lundgaard, A. Mogelhoj, V. Petzold, D. D. Landis, J. K. Norskov, T. Bligaard, and K. W. Jacobsen, Phys. Rev. B 85, 235149 (2012)
'GGA_X_BPCCAC' : 98 , # E. Bremond, D. Pilard, I. Ciofini, H. Chermette, C. Adamo, and P. Cortona, Theor. Chem. Acc. 131, 1184 (2012)
'GGA_X_C09X' : 158, # V. R. Cooper, Phys. Rev. B 81, 161104 (2010)
'GGA_X_CAP' : 270, # J. Carmona-Espindola, J. L. Gazquez, A. Vela, and S. B. Trickey, J. Chem. Phys. 142, 054105 (2015)
'GGA_X_CHACHIYO' : 298, # T. Chachiyo and H. Chachiyo, Molecules 25, 3485 (2020)
'GGA_X_DK87_R1' : 111, # A. E. DePristo and J. D. Kress, J. Chem. Phys. 86, 1425 (1987)
'GGA_X_DK87_R2' : 112, # A. E. DePristo and J. D. Kress, J. Chem. Phys. 86, 1425 (1987)
'GGA_X_EB88' : 271, # P. Elliott and K. Burke, Can. J. Chem. 87, 1485 (2009)
'GGA_X_ECMV92' : 215, # E. Engel, J. A. Chevary, L. D. Macdonald, and S. H. Vosko, Z. Phys. D: At., Mol. Clusters 23, 7 (1992)
'GGA_X_EV93' : 35 , # E. Engel and S. H. Vosko, Phys. Rev. B 47, 13164 (1993)
'GGA_X_FD_LB94' : 604, # A. P. Gaiduk and V. N. Staroverov, Phys. Rev. A 83, 012509 (2011)
'GGA_X_FD_REVLB94' : 605, # A. P. Gaiduk and V. N. Staroverov, Phys. Rev. A 83, 012509 (2011)
'GGA_X_FT97_A' : 114, # M. Filatov and W. Thiel, Mol. Phys. 91, 847 (1997)
'GGA_X_FT97_B' : 115, # M. Filatov and W. Thiel, Mol. Phys. 91, 847 (1997)
'GGA_X_G96' : 107, # P. M. W. Gill, Mol. Phys. 89, 433 (1996)
'GGA_X_GAM' : 32 , # H. S. Yu, W. Zhang, P. Verma, X. He, and D. G. Truhlar, Phys. Chem. Chem. Phys. 17, 12146 (2015)
'GGA_X_GG99' : 535, # A. T. Gilbert and P. M. Gill, Chem. Phys. Lett. 312, 511 (1999)
'GGA_X_HCTH_A' : 34 , # F. A. Hamprecht, A. J. Cohen, D. J. Tozer, and N. C. Handy, J. Chem. Phys. 109, 6264 (1998)
'GGA_X_HERMAN' : 104, # F. Herman, J. P. V. Dyke, and I. B. Ortenburger, Phys. Rev. Lett. 22, 807 (1969)
'GGA_X_HJS_B88' : 527, # T. M. Henderson, B. G. Janesko, and G. E. Scuseria, J. Chem. Phys. 128, 194105 (2008)
'GGA_X_HJS_B88_V2' : 46 , # E. Weintraub, T. M. Henderson, and G. E. Scuseria, J. Chem. Theory Comput. 5, 754 (2009)
'GGA_X_HJS_B97X' : 528, # T. M. Henderson, B. G. Janesko, and G. E. Scuseria, J. Chem. Phys. 128, 194105 (2008)
'GGA_X_HJS_PBE' : 525, # T. M. Henderson, B. G. Janesko, and G. E. Scuseria, J. Chem. Phys. 128, 194105 (2008)
'GGA_X_HJS_PBE_SOL' : 526, # T. M. Henderson, B. G. Janesko, and G. E. Scuseria, J. Chem. Phys. 128, 194105 (2008)
'GGA_X_HTBS' : 191, # P. Haas, F. Tran, P. Blaha, and K. Schwarz, Phys. Rev. B 83, 205117 (2011)
'GGA_X_ITYH' : 529, # H. Iikura, T. Tsuneda, T. Yanai, and K. Hirao, J. Chem. Phys. 115, 3540 (2001)
'GGA_X_ITYH_OPTX' : 622, # N. C. Handy and A. J. Cohen, Mol. Phys. 99, 403 (2001)
'GGA_X_ITYH_PBE' : 623, # J. P. Perdew, K. Burke, and M. Ernzerhof, Phys. Rev. Lett. 77, 3865 (1996)
'GGA_X_KGG99' : 544, # A. T. Gilbert and P. M. Gill, Chem. Phys. Lett. 312, 511 (1999)
'GGA_X_KT1' : 145, # T. W. Keal and D. J. Tozer, J. Chem. Phys. 119, 3015 (2003)
'GGA_X_LAG' : 193, # L. Vitos, B. Johansson, J. Kollar, and H. L. Skriver, Phys. Rev. B 62, 10046 (2000)
'GGA_X_LAMBDA_CH_N' : 44 , # M. M. Odashima, K. Capelle, and S. B. Trickey, J. Chem. Theory Comput. 5, 798 (2009)
'GGA_X_LAMBDA_LO_N' : 45 , # M. M. Odashima, K. Capelle, and S. B. Trickey, J. Chem. Theory Comput. 5, 798 (2009)
'GGA_X_LAMBDA_OC2_N' : 40 , # M. M. Odashima, K. Capelle, and S. B. Trickey, J. Chem. Theory Comput. 5, 798 (2009)
'GGA_X_LB' : 160, # R. van Leeuwen and E. J. Baerends, Phys. Rev. A 49, 2421 (1994)
'GGA_X_LBM' : 182, # P. R. T. Schipper, O. V. Gritsenko, S. J. A. van Gisbergen, and E. J. Baerends, J. Chem. Phys. 112, 1344 (2000)
'GGA_X_LG93' : 113, # D. J. Lacks and R. G. Gordon, Phys. Rev. A 47, 4681 (1993)
'GGA_X_LSPBE' : 168, # J. C. Pacheco-Kato, J. M. del Campo, J. L. Gazquez, S. Trickey, and A. Vela, Chem. Phys. Lett. 651, 268 (2016)
'GGA_X_LSRPBE' : 169, # J. C. Pacheco-Kato, J. M. del Campo, J. L. Gazquez, S. Trickey, and A. Vela, Chem. Phys. Lett. 651, 268 (2016)
'GGA_X_LV_RPW86' : 58 , # K. Berland and P. Hyldgaard, Phys. Rev. B 89, 035412 (2014)
'GGA_X_MB88' : 149, # V. Tognetti and C. Adamo, J. Phys. Chem. A 113, 14415 (2009)
'GGA_X_MPBE' : 122, # C. Adamo and V. Barone, J. Chem. Phys. 116, 5933 (2002)
'GGA_X_MPW91' : 119, # C. Adamo and V. Barone, J. Chem. Phys. 108, 664 (1998)
'GGA_X_N12' : 82 , # R. Peverati and D. G. Truhlar, J. Chem. Theory Comput. 8, 2310 (2012)
'GGA_X_NCAP' : 180, # J. Carmona-Espindola, J. L. Gazquez, A. Vela, and S. B. Trickey, J. Chem. Theory Comput. 15, 303 (2019)
'GGA_X_OL2' : 183, # P. Fuentealba and O. Reyes, Chem. Phys. Lett. 232, 31 (1995)
'GGA_X_OPTB86B_VDW' : 171, # J. Klimes, D. R. Bowler, and A. Michaelides, Phys. Rev. B 83, 195131 (2011)
'GGA_X_OPTB88_VDW' : 139, # J. Klimes, D. R. Bowler, and A. Michaelides, J. Phys.: Condens. Matter 22, 022201 (2010)
'GGA_X_OPTPBE_VDW' : 141, # J. Klimes, D. R. Bowler, and A. Michaelides, J. Phys.: Condens. Matter 22, 022201 (2010)
'GGA_X_OPTX' : 110, # N. C. Handy and A. J. Cohen, Mol. Phys. 99, 403 (2001)
'GGA_X_PBE' : 101, # J. P. Perdew, K. Burke, and M. Ernzerhof, Phys. Rev. Lett. 77, 3865 (1996)
'GGA_X_PBE_JSJR' : 126, # L. S. Pedroza, A. J. R. da Silva, and K. Capelle, Phys. Rev. B 79, 201106 (2009)
'GGA_X_PBE_MOL' : 49 , # J. M. del Campo, J. L. Gazquez, S. B. Trickey, and A. Vela, J. Chem. Phys. 136, 104108 (2012)
'GGA_X_PBE_R' : 102, # Y. Zhang and W. Yang, Phys. Rev. Lett. 80, 890 (1998)
'GGA_X_PBE_SOL' : 116, # J. P. Perdew, A. Ruzsinszky, G. I. Csonka, O. A. Vydrov, G. E. Scuseria, L. A. Constantin, X. Zhou, and K. Burke, Phys. Rev. Lett. 100, 136406 (2008)
'GGA_X_PBE_TCA' : 59 , # V. Tognetti, P. Cortona, and C. Adamo, Chem. Phys. Lett. 460, 536 (2008)
'GGA_X_PBEA' : 121, # G. K. H. Madsen, Phys. Rev. B 75, 195108 (2007)
'GGA_X_PBEFE' : 265, # R. Sarmiento-Perez, S. Botti, and M. A. L. Marques, J. Chem. Theory Comput. 11, 3844 (2015)
'GGA_X_PBEINT' : 60 , # E. Fabiano, L. A. Constantin, and F. Della Sala, Phys. Rev. B 82, 113104 (2010)
'GGA_X_PBEK1_VDW' : 140, # J. Klimes, D. R. Bowler, and A. Michaelides, J. Phys.: Condens. Matter 22, 022201 (2010)
'GGA_X_PBEPOW' : 539, # Eric Bremond, J. Chem. Phys. 145, 244102 (2016)
'GGA_X_PBETRANS' : 291, # Eric Bremond, I. Ciofini, and C. Adamo, Mol. Phys. 114, 1059 (2016)
'GGA_X_PW86' : 108, # J. P. Perdew and W. Yue, Phys. Rev. B 33, 8800 (1986)
'GGA_X_PW91' : 109, # J. P. Perdew, in Proceedings of the 75. WE-Heraeus-Seminar and 21st Annual International Symposium on Electronic Structure of Solids, edited by P. Ziesche and H. Eschrig (Akademie Verlag, Berlin, 1991) p. 11
'GGA_X_PW91_MOD' : 316, # J. P. Perdew, in Proceedings of the 75. WE-Heraeus-Seminar and 21st Annual International Symposium on Electronic Structure of Solids, edited by P. Ziesche and H. Eschrig (Akademie Verlag, Berlin, 1991) p. 11
'GGA_X_Q2D' : 48 , # L. Chiodo, L. A. Constantin, E. Fabiano, and F. Della Sala, Phys. Rev. Lett. 108, 126402 (2012)
'GGA_X_REVSSB_D' : 312, # M. Swart, M. Sola, and F. M. Bickelhaupt, J. Comput. Chem. 32, 1117 (2011)
'GGA_X_RGE2' : 142, # A. Ruzsinszky, G. I. Csonka, and G. E. Scuseria, J. Chem. Theory Comput. 5, 763 (2009)
'GGA_X_RPBE' : 117, # B. Hammer, L. B. Hansen, and J. K. Norskov, Phys. Rev. B 59, 7413 (1999)
'GGA_X_RPW86' : 144, # E. D. Murray, K. Lee, and D. C. Langreth, J. Chem. Theory Comput. 5, 2754 (2009)
'GGA_X_S12G' : 495, # M. Swart, Chem. Phys. Lett. 580, 166 (2013)
'GGA_X_SFAT' : 530, # A. Savin and H.-J. Flad, Int. J. Quantum Chem. 56, 327 (1995)
'GGA_X_SFAT_PBE' : 601, # A. Savin and H.-J. Flad, Int. J. Quantum Chem. 56, 327 (1995)
'GGA_X_SG4' : 533, # L. A. Constantin, A. Terentjevs, F. Della Sala, P. Cortona, and E. Fabiano, Phys. Rev. B 93, 045126 (2016)
'GGA_X_SOGGA' : 150, # Y. Zhao and D. G. Truhlar, J. Chem. Phys. 128, 184109 (2008)
'GGA_X_SOGGA11' : 151, # R. Peverati, Y. Zhao, and D. G. Truhlar, J. Phys. Chem. Lett. 2, 1991 (2011)
'GGA_X_SSB' : 91 , # M. Swart, M. Sola, and F. M. Bickelhaupt, J. Chem. Phys. 131, 094103 (2009)
'GGA_X_SSB_D' : 92 , # M. Swart, M. Sola, and F. M. Bickelhaupt, J. Chem. Phys. 131, 094103 (2009)
'GGA_X_SSB_SW' : 90 , # M. Swart, M. Sola, and F. M. Bickelhaupt, J. Comput. Methods Sci. Eng. 9, 69 (2009)
'GGA_X_VMT84_GE' : 68 , # A. Vela, J. C. Pacheco-Kato, J. L. Gazquez, J. M. del Campo, and S. B. Trickey, J. Chem. Phys. 136, 144115 (2012)
'GGA_X_VMT84_PBE' : 69 , # A. Vela, J. C. Pacheco-Kato, J. L. Gazquez, J. M. del Campo, and S. B. Trickey, J. Chem. Phys. 136, 144115 (2012)
'GGA_X_VMT_GE' : 70 , # A. Vela, V. Medel, and S. B. Trickey, J. Chem. Phys. 130, 244103 (2009)
'GGA_X_VMT_PBE' : 71 , # A. Vela, V. Medel, and S. B. Trickey, J. Chem. Phys. 130, 244103 (2009)
'GGA_X_WC' : 118, # Z. Wu and R. E. Cohen, Phys. Rev. B 73, 235116 (2006)
'GGA_X_WPBEH' : 524, # J. Heyd, G. E. Scuseria, and M. Ernzerhof, J. Chem. Phys. 118, 8207 (2003)
'GGA_X_XPBE' : 123, # X. Xu and W. A. Goddard, J. Chem. Phys. 121, 4068 (2004)
'GGA_XC_B97_D' : 170, # S. Grimme, J. Comput. Chem. 27, 1787 (2006)
'GGA_XC_B97_GGA1' : 96 , # A. J. Cohen and N. C. Handy, Chem. Phys. Lett. 316, 160 (2000)
'GGA_XC_BEEFVDW' : 286, # J. Wellendorff, K. T. Lundgaard, A. Mogelhoj, V. Petzold, D. D. Landis, J. K. Norskov, T. Bligaard, and K. W. Jacobsen, Phys. Rev. B 85, 235149 (2012)
'GGA_XC_EDF1' : 165, # R. D. Adamson, P. M. W. Gill, and J. A. Pople, Chem. Phys. Lett. 284, 6 (1998)
'GGA_XC_HCTH_120' : 162, # A. D. Boese, N. L. Doltsinis, N. C. Handy, and M. Sprik, J. Chem. Phys. 112, 1670 (2000)
'GGA_XC_HCTH_147' : 163, # A. D. Boese, N. L. Doltsinis, N. C. Handy, and M. Sprik, J. Chem. Phys. 112, 1670 (2000)
'GGA_XC_HCTH_407' : 164, # A. D. Boese and N. C. Handy, J. Chem. Phys. 114, 5497 (2001)
'GGA_XC_HCTH_407P' : 93 , # A. D. Boese, A. Chandra, J. M. L. Martin, and D. Marx, J. Chem. Phys. 119, 5965 (2003)
'GGA_XC_HCTH_93' : 161, # F. A. Hamprecht, A. J. Cohen, D. J. Tozer, and N. C. Handy, J. Chem. Phys. 109, 6264 (1998)
'GGA_XC_HCTH_P14' : 95 , # G. Menconi, P. J. Wilson, and D. J. Tozer, J. Chem. Phys. 114, 3958 (2001)
'GGA_XC_HCTH_P76' : 94 , # G. Menconi, P. J. Wilson, and D. J. Tozer, J. Chem. Phys. 114, 3958 (2001)
'GGA_XC_HLE16' : 545, # P. Verma and D. G. Truhlar, J. Phys. Chem. Lett. 8, 380 (2017)
'GGA_XC_KT1' : 167, # T. W. Keal and D. J. Tozer, J. Chem. Phys. 119, 3015 (2003)
'GGA_XC_KT2' : 146, # T. W. Keal and D. J. Tozer, J. Chem. Phys. 119, 3015 (2003)
'GGA_XC_KT3' : 587, # T. W. Keal and D. J. Tozer, J. Chem. Phys. 121, 5654 (2004)
'GGA_XC_LB07' : 589, # E. Livshits and R. Baer, Phys. Chem. Chem. Phys. 9, 2932 (2007)
'GGA_XC_MOHLYP' : 194, # N. E. Schultz, Y. Zhao, and D. G. Truhlar, J. Phys. Chem. A 109, 11127 (2005)
'GGA_XC_MOHLYP2' : 195, # J. Zheng, Y. Zhao, and D. G. Truhlar, J. Chem. Theory Comput. 5, 808 (2009)
'GGA_XC_MPWLYP1W' : 174, # E. E. Dahlke and D. G. Truhlar, J. Phys. Chem. B 109, 15677 (2005)
'GGA_XC_NCAP' : 181, # J. Carmona-Espindola, J. L. Gazquez, A. Vela, and S. B. Trickey, J. Chem. Theory Comput. 15, 303 (2019)
'GGA_XC_OBLYP_D' : 67 , # L. Goerigk and S. Grimme, J. Chem. Theory Comput. 6, 107 (2010)
'GGA_XC_OPBE_D' : 65 , # L. Goerigk and S. Grimme, J. Chem. Theory Comput. 6, 107 (2010)
'GGA_XC_OPWLYP_D' : 66 , # L. Goerigk and S. Grimme, J. Chem. Theory Comput. 6, 107 (2010)
'GGA_XC_PBE1W' : 173, # E. E. Dahlke and D. G. Truhlar, J. Phys. Chem. B 109, 15677 (2005)
'GGA_XC_PBELYP1W' : 175, # E. E. Dahlke and D. G. Truhlar, J. Phys. Chem. B 109, 15677 (2005)
'GGA_XC_TH1' : 154, # D. J. Tozer and N. C. Handy, J. Chem. Phys. 108, 2545 (1998)
'GGA_XC_TH2' : 155, # D. J. Tozer and N. C. Handy, J. Phys. Chem. A 102, 3162 (1998)
'GGA_XC_TH3' : 156, # N. C. Handy and D. J. Tozer, Mol. Phys. 94, 707 (1998)
'GGA_XC_TH4' : 157, # N. C. Handy and D. J. Tozer, Mol. Phys. 94, 707 (1998)
'GGA_XC_TH_FC' : 197, # D. J. Tozer, N. C. Handy, and W. H. Green, Chem. Phys. Lett. 273, 183 (1997)
'GGA_XC_TH_FCFO' : 198, # D. J. Tozer, N. C. Handy, and W. H. Green, Chem. Phys. Lett. 273, 183 (1997)
'GGA_XC_TH_FCO' : 199, # D. J. Tozer, N. C. Handy, and W. H. Green, Chem. Phys. Lett. 273, 183 (1997)
'GGA_XC_TH_FL' : 196, # D. J. Tozer, N. C. Handy, and W. H. Green, Chem. Phys. Lett. 273, 183 (1997)
'GGA_XC_VV10' : 255, # O. A. Vydrov and T. Van Voorhis, J. Chem. Phys. 133, 244103 (2010)
'GGA_XC_XLYP' : 166, # X. Xu and W. A. Goddard, Proc. Natl. Acad. Sci. U. S. A. 101, 2673 (2004)
#'HYB_GGA_X_LC2GAU' : 710, # J.-W. Song, M. A. Watson, and K. Hirao, J. Chem. Phys. 131, 144108 (2009)
#'HYB_GGA_X_LCGAU' : 708, # J.-W. Song, S. Tokura, T. Sato, M. A. Watson, and K. Hirao, J. Chem. Phys. 127, 154109 (2007)
#'HYB_GGA_X_LCGAU_CORE' : 709, # J.-W. Song, M. A. Watson, A. Nakata, and K. Hirao, J. Chem. Phys. 129, 184113 (2008)
'HYB_GGA_X_N12_SX' : 81 , # R. Peverati and D. G. Truhlar, Phys. Chem. Chem. Phys. 14, 16187 (2012)
'HYB_GGA_X_S12H' : 496, # M. Swart, Chem. Phys. Lett. 580, 166 (2013)
'HYB_GGA_X_SOGGA11_X' : 426, # R. Peverati and D. G. Truhlar, J. Chem. Phys. 135, 191102 (2011)
'HYB_GGA_XC_APBE0' : 607, # E. Fabiano, L. A. Constantin, P. Cortona, and F. Della Sala, J. Chem. Theory Comput. 11, 122 (2015)
'HYB_GGA_XC_APF' : 409, # A. Austin, G. A. Petersson, M. J. Frisch, F. J. Dobek, G. Scalmani, and K. Throssell, J. Chem. Theory Comput. 8, 4989 (2012)
'HYB_GGA_XC_B1LYP' : 416, # C. Adamo and V. Barone, Chem. Phys. Lett. 274, 242 (1997)
'HYB_GGA_XC_B1PW91' : 417, # C. Adamo and V. Barone, Chem. Phys. Lett. 274, 242 (1997)
'HYB_GGA_XC_B1WC' : 412, # D. I. Bilc, R. Orlando, R. Shaltaf, G.-M. Rignanese, J. Iniguez, and P. Ghosez, Phys. Rev. B 77, 165107 (2008)
#'HYB_GGA_XC_B2PLYP' : 713, # S. Grimme, J. Chem. Phys. 124, 034108 (2006)
'HYB_GGA_XC_B3LYP' : 402, # P. J. Stephens, F. J. Devlin, C. F. Chabalowski, and M. J. Frisch, J. Phys. Chem. 98, 11623 (1994)
'HYB_GGA_XC_B3LYP5' : 475, # P. J. Stephens, F. J. Devlin, C. F. Chabalowski, and M. J. Frisch, J. Phys. Chem. 98, 11623 (1994)
'HYB_GGA_XC_B3LYP_MCM1' : 461, # M. T. Caldeira and R. Custodio, J. Mol. Model. 25, 62 (2019)
'HYB_GGA_XC_B3LYP_MCM2' : 462, # M. T. Caldeira and R. Custodio, J. Mol. Model. 25, 62 (2019)
'HYB_GGA_XC_B3LYPS' : 459, # M. Reiher, O. Salomon, and B. A. Hess, Theor. Chem. Acc. 107, 48 (2001)
'HYB_GGA_XC_B3P86' : 403, # Defined through Gaussian implementation
#'HYB_GGA_XC_B3P86_NWCHEM' : 315, # Defined through NWChem implementation
'HYB_GGA_XC_B3PW91' : 401, # A. D. Becke, J. Chem. Phys. 98, 5648 (1993)
'HYB_GGA_XC_B5050LYP' : 572, # Y. Shao, M. Head-Gordon, and A. I. Krylov, J. Chem. Phys. 118, 4807 (2003)
'HYB_GGA_XC_B97' : 407, # A. D. Becke, J. Chem. Phys. 107, 8554 (1997)
'HYB_GGA_XC_B97_1' : 408, # F. A. Hamprecht, A. J. Cohen, D. J. Tozer, and N. C. Handy, J. Chem. Phys. 109, 6264 (1998)
'HYB_GGA_XC_B97_1P' : 266, # A. J. Cohen and N. C. Handy, Chem. Phys. Lett. 316, 160 (2000)
'HYB_GGA_XC_B97_2' : 410, # P. J. Wilson, T. J. Bradley, and D. J. Tozer, J. Chem. Phys. 115, 9233 (2001)
'HYB_GGA_XC_B97_3' : 414, # T. W. Keal and D. J. Tozer, J. Chem. Phys. 123, 121103 (2005)
'HYB_GGA_XC_B97_K' : 413, # A. D. Boese and J. M. L. Martin, J. Chem. Phys. 121, 3405 (2004)
'HYB_GGA_XC_BHANDH' : 435, # A. D. Becke, J. Chem. Phys. 98, 1372 (1993)
'HYB_GGA_XC_BHANDHLYP' : 436, # A. D. Becke, J. Chem. Phys. 98, 1372 (1993)
'HYB_GGA_XC_BLYP35' : 499, # M. Renz, K. Theilacker, C. Lambert, and M. Kaupp, J. Am. Chem. Soc. 131, 16292 (2009)
'HYB_GGA_XC_CAM_B3LYP' : 433, # T. Yanai, D. P. Tew, and N. C. Handy, Chem. Phys. Lett. 393, 51 (2004)
'HYB_GGA_XC_CAM_O3LYP' : 395, # M. P. Bircher and U. Rothlisberger, J. Chem. Theory Comput. 14, 3184 (2018)
'HYB_GGA_XC_CAM_PBEH' : 681, # W. Chen, G. Miceli, G.-M. Rignanese, and A. Pasquarello, Phys. Rev. Mater. 2, 073803 (2018)
'HYB_GGA_XC_CAM_QTP_00' : 490, # P. Verma and R. J. Bartlett, J. Chem. Phys. 140, 18A534 (2014)
'HYB_GGA_XC_CAM_QTP_01' : 482, # Y. Jin and R. J. Bartlett, J. Chem. Phys. 145, 034107 (2016)
'HYB_GGA_XC_CAM_QTP_02' : 491, # R. L. A. Haiduke and R. J. Bartlett, J. Chem. Phys. 148, 184106 (2018)
'HYB_GGA_XC_CAMH_B3LYP' : 614, # Y. Shao, Y. Mei, D. Sundholm, and V. R. I. Kaila, J. Chem. Theory Comput. 16, 587 (2020), https://doi.org/10.1021/acs.jctc.9b00823
'HYB_GGA_XC_CAMY_B3LYP' : 470, # M. Seth and T. Ziegler, J. Chem. Theory Comput. 8, 901 (2012)
'HYB_GGA_XC_CAMY_BLYP' : 455, # Y. Akinaga and S. Ten-no, Chem. Phys. Lett. 462, 348 (2008)
'HYB_GGA_XC_CAMY_PBEH' : 682, # W. Chen, G. Miceli, G.-M. Rignanese, and A. Pasquarello, Phys. Rev. Mater. 2, 073803 (2018)
'HYB_GGA_XC_CAP0' : 477, # J. Carmona-Espindola, J. L. Gazquez, A. Vela, and S. B. Trickey, Theor. Chem. Acc. 135, 120 (2016)
'HYB_GGA_XC_EDF2' : 476, # C. Y. Lin, M. W. George, and P. M. W. Gill, Aust. J. Chem. 57, 365 (2004)
'HYB_GGA_XC_HAPBE' : 608, # E. Fabiano, L. A. Constantin, P. Cortona, and F. Della Sala, J. Chem. Theory Comput. 11, 122 (2015)
'HYB_GGA_XC_HFLYP' : 314, # C. Lee, W. Yang, and R. G. Parr, Phys. Rev. B 37, 785 (1988)
#'HYB_GGA_XC_HISS' : 717, # T. M. Henderson, A. F. Izmaylov, G. E. Scuseria, and A. Savin, J. Chem. Phys. 127, 221103 (2007)
'HYB_GGA_XC_HJS_B88' : 431, # T. M. Henderson, B. G. Janesko, and G. E. Scuseria, J. Chem. Phys. 128, 194105 (2008)
'HYB_GGA_XC_HJS_B97X' : 432, # T. M. Henderson, B. G. Janesko, and G. E. Scuseria, J. Chem. Phys. 128, 194105 (2008)
'HYB_GGA_XC_HJS_PBE' : 429, # T. M. Henderson, B. G. Janesko, and G. E. Scuseria, J. Chem. Phys. 128, 194105 (2008)
'HYB_GGA_XC_HJS_PBE_SOL' : 430, # T. M. Henderson, B. G. Janesko, and G. E. Scuseria, J. Chem. Phys. 128, 194105 (2008)
'HYB_GGA_XC_HPBEINT' : 472, # E. Fabiano, L. A. Constantin, and F. Della Sala, Int. J. Quantum Chem. 113, 673 (2013)
'HYB_GGA_XC_HSE03' : 427, # J. Heyd, G. E. Scuseria, and M. Ernzerhof, J. Chem. Phys. 118, 8207 (2003)
'HYB_GGA_XC_HSE06' : 428, # J. Heyd, G. E. Scuseria, and M. Ernzerhof, J. Chem. Phys. 118, 8207 (2003)
'HYB_GGA_XC_HSE12' : 479, # J. E. Moussa, P. A. Schultz, and J. R. Chelikowsky, J. Chem. Phys. 136, 204117 (2012)
'HYB_GGA_XC_HSE12S' : 480, # J. E. Moussa, P. A. Schultz, and J. R. Chelikowsky, J. Chem. Phys. 136, 204117 (2012)
'HYB_GGA_XC_HSE_SOL' : 481, # L. Schimka, J. Harl, and G. Kresse, J. Chem. Phys. 134, 024116 (2011)
'HYB_GGA_XC_KMLYP' : 485, # J. K. Kang and C. B. Musgrave, J. Chem. Phys. 115, 11040 (2001)
'HYB_GGA_XC_LC_BLYP' : 400, # L. N. Anderson, M. B. Oviedo, and B. M. Wong, J. Chem. Theory Comput. 13, 1656 (2017)
'HYB_GGA_XC_LC_BOP' : 636, # J.-W. Song, T. Hirosawa, T. Tsuneda, and K. Hirao, J. Chem. Phys. 126, 154105 (2007)
'HYB_GGA_XC_LC_PBEOP' : 637, # Y. Tawada, T. Tsuneda, S. Yanagisawa, T. Yanai, and K. Hirao, J. Chem. Phys. 120, 8425 (2004)
'HYB_GGA_XC_LC_QTP' : 492, # R. L. A. Haiduke and R. J. Bartlett, J. Chem. Phys. 148, 184106 (2018)
'HYB_GGA_XC_LC_VV10' : 469, # O. A. Vydrov and T. Van Voorhis, J. Chem. Phys. 133, 244103 (2010)
'HYB_GGA_XC_LC_WPBE' : 478, # O. A. Vydrov and G. E. Scuseria, J. Chem. Phys. 125, 234109 (2006)
'HYB_GGA_XC_LC_WPBE08_WHS' : 488, # E. Weintraub, T. M. Henderson, and G. E. Scuseria, J. Chem. Theory Comput. 5, 754 (2009)
'HYB_GGA_XC_LC_WPBE_WHS' : 486, # E. Weintraub, T. M. Henderson, and G. E. Scuseria, J. Chem. Theory Comput. 5, 754 (2009)
'HYB_GGA_XC_LC_WPBEH_WHS' : 487, # E. Weintraub, T. M. Henderson, and G. E. Scuseria, J. Chem. Theory Comput. 5, 754 (2009)
'HYB_GGA_XC_LC_WPBESOL_WHS' : 489, # E. Weintraub, T. M. Henderson, and G. E. Scuseria, J. Chem. Theory Comput. 5, 754 (2009)
'HYB_GGA_XC_LCY_BLYP' : 468, # Y. Akinaga and S. Ten-no, Chem. Phys. Lett. 462, 348 (2008)
'HYB_GGA_XC_LCY_PBE' : 467, # M. Seth and T. Ziegler, J. Chem. Theory Comput. 8, 901 (2012)
'HYB_GGA_XC_LRC_WPBE' : 473, # M. A. Rohrdanz, K. M. Martins, and J. M. Herbert, J. Chem. Phys. 130, 054112 (2009)
'HYB_GGA_XC_LRC_WPBEH' : 465, # M. A. Rohrdanz, K. M. Martins, and J. M. Herbert, J. Chem. Phys. 130, 054112 (2009)
'HYB_GGA_XC_MB3LYP_RC04' : 437, # V. Tognetti, P. Cortona, and C. Adamo, Chem. Phys. Lett. 439, 381 (2007)
'HYB_GGA_XC_MPW1K' : 405, # B. J. Lynch, P. L. Fast, M. Harris, and D. G. Truhlar, J. Phys. Chem. A 104, 4811 (2000)
'HYB_GGA_XC_MPW1LYP' : 483, # C. Adamo and V. Barone, J. Chem. Phys. 108, 664 (1998)
'HYB_GGA_XC_MPW1PBE' : 484, # C. Adamo and V. Barone, J. Chem. Phys. 108, 664 (1998)
'HYB_GGA_XC_MPW1PW' : 418, # C. Adamo and V. Barone, J. Chem. Phys. 108, 664 (1998)
'HYB_GGA_XC_MPW3LYP' : 419, # Y. Zhao and D. G. Truhlar, J. Phys. Chem. A 108, 6908 (2004)
'HYB_GGA_XC_MPW3PW' : 415, # C. Adamo and V. Barone, J. Chem. Phys. 108, 664 (1998)
'HYB_GGA_XC_MPWLYP1M' : 453, # N. E. Schultz, Y. Zhao, and D. G. Truhlar, J. Phys. Chem. A 109, 11127 (2005)
'HYB_GGA_XC_O3LYP' : 404, # A. J. Cohen and N. C. Handy, Mol. Phys. 99, 607 (2001)
'HYB_GGA_XC_PBE0_13' : 456, # P. Cortona, J. Chem. Phys. 136, 086101 (2012)
'HYB_GGA_XC_PBE50' : 290, # Y. A. Bernard, Y. Shao, and A. I. Krylov, J. Chem. Phys. 136, 204103 (2012)
'HYB_GGA_XC_PBE_MOL0' : 273, # J. M. del Campo, J. L. Gazquez, S. B. Trickey, and A. Vela, J. Chem. Phys. 136, 104108 (2012)
'HYB_GGA_XC_PBE_MOLB0' : 276, # J. M. del Campo, J. L. Gazquez, S. B. Trickey, and A. Vela, J. Chem. Phys. 136, 104108 (2012)
'HYB_GGA_XC_PBE_SOL0' : 274, # J. M. del Campo, J. L. Gazquez, S. B. Trickey, and A. Vela, J. Chem. Phys. 136, 104108 (2012)
'HYB_GGA_XC_PBEB0' : 275, # J. M. del Campo, J. L. Gazquez, S. B. Trickey, and A. Vela, J. Chem. Phys. 136, 104108 (2012)
'HYB_GGA_XC_PBEH' : 406, # C. Adamo and V. Barone, J. Chem. Phys. 110, 6158 (1999)
'HYB_GGA_XC_QTP17' : 460, # Y. Jin and R. J. Bartlett, J. Chem. Phys. 149, 064111 (2018)
'HYB_GGA_XC_RCAM_B3LYP' : 610, # A. J. Cohen, P. Mori-Sanchez, and W. Yang, J. Chem. Phys. 126, 191109 (2007)
'HYB_GGA_XC_REVB3LYP' : 454, # L. Lu, H. Hu, H. Hou, and B. Wang, Comput. Theor. Chem. 1015, 64 (2013)
'HYB_GGA_XC_SB98_1A' : 420, # H. L. Schmider and A. D. Becke, J. Chem. Phys. 108, 9624 (1998)
'HYB_GGA_XC_SB98_1B' : 421, # H. L. Schmider and A. D. Becke, J. Chem. Phys. 108, 9624 (1998)
'HYB_GGA_XC_SB98_1C' : 422, # H. L. Schmider and A. D. Becke, J. Chem. Phys. 108, 9624 (1998)
'HYB_GGA_XC_SB98_2A' : 423, # H. L. Schmider and A. D. Becke, J. Chem. Phys. 108, 9624 (1998)
'HYB_GGA_XC_SB98_2B' : 424, # H. L. Schmider and A. D. Becke, J. Chem. Phys. 108, 9624 (1998)
'HYB_GGA_XC_SB98_2C' : 425, # H. L. Schmider and A. D. Becke, J. Chem. Phys. 108, 9624 (1998)
#'HYB_GGA_XC_SRC1_BLYP' : 714, # N. A. Besley, M. J. G. Peach, and D. J. Tozer, Phys. Chem. Chem. Phys. 11, 10350 (2009)
#'HYB_GGA_XC_SRC2_BLYP' : 715, # N. A. Besley, M. J. G. Peach, and D. J. Tozer, Phys. Chem. Chem. Phys. 11, 10350 (2009)
'HYB_GGA_XC_TUNED_CAM_B3LYP' : 434, # K. Okuno, Y. Shigeta, R. Kishi, H. Miyasaka, and M. Nakano, J. Photochem. Photobiol., A 235, 29 (2012)
'HYB_GGA_XC_WB97' : 463, # J.-D. Chai and M. Head-Gordon, J. Chem. Phys. 128, 084106 (2008)
'HYB_GGA_XC_WB97X' : 464, # J.-D. Chai and M. Head-Gordon, J. Chem. Phys. 128, 084106 (2008)
'HYB_GGA_XC_WB97X_D' : 471, # J.-D. Chai and M. Head-Gordon, Phys. Chem. Chem. Phys. 10, 6615 (2008)
'HYB_GGA_XC_WB97X_D3' : 399, # Y.-S. Lin, G.-D. Li, S.-P. Mao, and J.-D. Chai, J. Chem. Theory Comput. 9, 263 (2013)
'HYB_GGA_XC_WB97X_V' : 466, # N. Mardirossian and M. Head-Gordon, Phys. Chem. Chem. Phys. 16, 9904 (2014)
'HYB_GGA_XC_WC04' : 611, # K. W. Wiitala, T. R. Hoye, and C. J. Cramer, J. Chem. Theory Comput. 2, 1085 (2006)
'HYB_GGA_XC_WHPBE0' : 615, # Y. Shao, Y. Mei, D. Sundholm, and V. R. I. Kaila, J. Chem. Theory Comput. 16, 587 (2020), https://doi.org/10.1021/acs.jctc.9b00823
'HYB_GGA_XC_WP04' : 612, # K. W. Wiitala, T. R. Hoye, and C. J. Cramer, J. Chem. Theory Comput. 2, 1085 (2006)
'HYB_GGA_XC_X3LYP' : 411, # X. Xu and W. A. Goddard, Proc. Natl. Acad. Sci. U. S. A. 101, 2673 (2004)
'MGGA_C_B88' : 571, # A. D. Becke, J. Chem. Phys. 88, 1053 (1988)
'MGGA_C_B94' : 397, # A. D. Becke, Int. J. Quantum Chem. 52, 625 (1994)
'MGGA_C_BC95' : 240, # A. D. Becke, J. Chem. Phys. 104, 1040 (1996)
'MGGA_C_CS' : 72 , # R. Colle and O. Salvetti, Theor. Chim. Acta 37, 329 (1975)
'MGGA_C_DLDF' : 37 , # K. Pernal, R. Podeszwa, K. Patkowski, and K. Szalewicz, Phys. Rev. Lett. 103, 263201 (2009)
'MGGA_C_HLTAPW' : 699, # S. Lehtola and M. A. L. Marques, Meta-local density functionals: a new rung on jacob's ladder, (2020), arXiv:2006.16835 [physics.chem-ph]
'MGGA_C_KCIS' : 562, # J. Rey and A. Savin, Int. J. Quantum Chem. 69, 581 (1998)
'MGGA_C_KCISK' : 638, # J. Rey and A. Savin, Int. J. Quantum Chem. 69, 581 (1998)
'MGGA_C_M05' : 237, # Y. Zhao, N. E. Schultz, and D. G. Truhlar, J. Chem. Phys. 123, 161103 (2005)
'MGGA_C_M05_2X' : 238, # Y. Zhao, N. E. Schultz, and D. G. Truhlar, J. Chem. Theory Comput. 2, 364 (2006)
'MGGA_C_M06' : 235, # Y. Zhao and D. G. Truhlar, Theor. Chem. Acc. 120, 215 (2008)
'MGGA_C_M06_2X' : 236, # Y. Zhao and D. G. Truhlar, Theor. Chem. Acc. 120, 215 (2008)
'MGGA_C_M06_HF' : 234, # Y. Zhao and D. G. Truhlar, J. Phys. Chem. A 110, 13126 (2006)
'MGGA_C_M06_L' : 233, # Y. Zhao and D. G. Truhlar, J. Chem. Phys. 125, 194101 (2006)
'MGGA_C_M06_SX' : 311, # Y. Wang, P. Verma, L. Zhang, Y. Li, Z. Liu, D. G. Truhlar, and X. He, Proc. Natl. Acad. Sci. U. S. A. 117, 2294 (2020), https://www.pnas.org/content/117/5/2294.full.pdf
'MGGA_C_M08_HX' : 78 , # Y. Zhao and D. G. Truhlar, J. Chem. Theory Comput. 4, 1849 (2008)
'MGGA_C_M08_SO' : 77 , # Y. Zhao and D. G. Truhlar, J. Chem. Theory Comput. 4, 1849 (2008)
'MGGA_C_M11' : 76 , # R. Peverati and D. G. Truhlar, J. Phys. Chem. Lett. 2, 2810 (2011)
'MGGA_C_M11_L' : 75 , # R. Peverati and D. G. Truhlar, J. Phys. Chem. Lett. 3, 117 (2012)
'MGGA_C_MN12_L' : 74 , # R. Peverati and D. G. Truhlar, Phys. Chem. Chem. Phys. 14, 13171 (2012)
'MGGA_C_MN12_SX' : 73 , # R. Peverati and D. G. Truhlar, Phys. Chem. Chem. Phys. 14, 16187 (2012)
'MGGA_C_MN15' : 269, # H. S. Yu, X. He, S. L. Li, and D. G. Truhlar, Chem. Sci. 7, 5032 (2016)
'MGGA_C_MN15_L' : 261, # H. S. Yu, X. He, and D. G. Truhlar, J. Chem. Theory Comput. 12, 1280 (2016)
'MGGA_C_PKZB' : 239, # J. P. Perdew, S. Kurth, A. Zupan, and P. Blaha, Phys. Rev. Lett. 82, 2544 (1999)
'MGGA_C_R2SCAN' : 498, # J. W. Furness, A. D. Kaplan, J. Ning, J. P. Perdew, and J. Sun, J. Phys. Chem. Lett. 11, 8208 (2020)
'MGGA_C_R2SCANL' : 719, # D. Mejia-Rodriguez and S. B. Trickey, Phys. Rev. B 102, 121109 (2020)
'MGGA_C_REVM06' : 306, # Y. Wang, P. Verma, X. Jin, D. G. Truhlar, and X. He, Proc. Natl. Acad. Sci. U. S. A. 115, 10257 (2018)
'MGGA_C_REVM06_L' : 294, # Y. Wang, X. Jin, H. S. Yu, D. G. Truhlar, and X. He, Proc. Natl. Acad. Sci. U. S. A. 114, 8487 (2017)
'MGGA_C_REVM11' : 172, # P. Verma, Y. Wang, S. Ghosh, X. He, and D. G. Truhlar, J. Phys. Chem. A 123, 2966 (2019)
'MGGA_C_REVSCAN' : 582, # P. D. Mezei, G. I. Csonka, and M. Kallay, J. Chem. Theory Comput. 14, 2469 (2018)
'MGGA_C_REVSCAN_VV10' : 585, # P. D. Mezei, G. I. Csonka, and M. Kallay, J. Chem. Theory Comput. 14, 2469 (2018)
'MGGA_C_REVTM' : 694, # S. Jana, K. Sharma, and P. Samal, J. Phys. Chem. A 123, 6356 (2019)
'MGGA_C_REVTPSS' : 241, # J. P. Perdew, A. Ruzsinszky, G. I. Csonka, L. A. Constantin, and J. Sun, Phys. Rev. Lett. 103, 026403 (2009)
'MGGA_C_RSCAN' : 494, # A. P. Bartok and J. R. Yates, J. Chem. Phys. 150, 161101 (2019)
'MGGA_C_SCAN' : 267, # J. Sun, A. Ruzsinszky, and J. P. Perdew, Phys. Rev. Lett. 115, 036402 (2015)
'MGGA_C_SCAN_RVV10' : 292, # H. Peng, Z.-H. Yang, J. P. Perdew, and J. Sun, Phys. Rev. X 6, 041005 (2016)
'MGGA_C_SCAN_VV10' : 584, # J. G. Brandenburg, J. E. Bates, J. Sun, and J. P. Perdew, Phys. Rev. B 94, 115144 (2016)
'MGGA_C_SCANL' : 702, # D. Mejia-Rodriguez and S. B. Trickey, Phys. Rev. A 96, 052512 (2017)
'MGGA_C_SCANL_RVV10' : 703, # D. Mejia-Rodriguez and S. B. Trickey, Phys. Rev. A 96, 052512 (2017)
'MGGA_C_SCANL_VV10' : 704, # D. Mejia-Rodriguez and S. B. Trickey, Phys. Rev. A 96, 052512 (2017)
'MGGA_C_TM' : 251, # J. Tao and Y. Mo, Phys. Rev. Lett. 117, 073001 (2016)
'MGGA_C_TPSS' : 231, # J. Tao, J. P. Perdew, V. N. Staroverov, and G. E. Scuseria, Phys. Rev. Lett. 91, 146401 (2003)
'MGGA_C_TPSSLOC' : 247, # L. A. Constantin, E. Fabiano, and F. Della Sala, Phys. Rev. B 86, 035130 (2012)
'MGGA_C_VSXC' : 232, # T. V. Voorhis and G. E. Scuseria, J. Chem. Phys. 109, 400 (1998)
'MGGA_K_CSK1' : 629, # A. C. Cancio, D. Stewart, and A. Kuna, J. Chem. Phys. 144, 084107 (2016)
'MGGA_K_CSK4' : 630, # A. C. Cancio, D. Stewart, and A. Kuna, J. Chem. Phys. 144, 084107 (2016)
'MGGA_K_CSK_LOC1' : 631, # A. C. Cancio, D. Stewart, and A. Kuna, J. Chem. Phys. 144, 084107 (2016)
'MGGA_K_CSK_LOC4' : 632, # A. C. Cancio, D. Stewart, and A. Kuna, J. Chem. Phys. 144, 084107 (2016)
'MGGA_K_GEA2' : 627, # A. S. Kompaneets and E. S. Pavlovskii, Zh. Eksp. Teor. Fiz. 31, 427 (1956), [J. Exp. Theor. Phys. 4, 328 (1957)]
'MGGA_K_GEA4' : 628, # C. H. Hodges, Can. J. Phys. 51, 1428 (1973)
'MGGA_K_L04' : 617, # S. Laricchia, L. A. Constantin, E. Fabiano, and F. Della Sala, J. Chem. Theory Comput. 10, 164 (2014)
'MGGA_K_L06' : 618, # S. Laricchia, L. A. Constantin, E. Fabiano, and F. Della Sala, J. Chem. Theory Comput. 10, 164 (2014)
'MGGA_K_PC07' : 543, # J. P. Perdew and L. A. Constantin, Phys. Rev. B 75, 155109 (2007)
'MGGA_K_PC07_OPT' : 634, # D. Mejia-Rodriguez and S. B. Trickey, Phys. Rev. A 96, 052512 (2017)
'MGGA_K_PGSL025' : 220, # L. A. Constantin, E. Fabiano, and F. Della Sala, J. Phys. Chem. Lett. 9, 4385 (2018), pMID: 30019904
'MGGA_K_RDA' : 621, # V. V. Karasiev, R. S. Jones, S. B. Trickey, and F. E. Harris, Phys. Rev. B 80, 245120 (2009)
'MGGA_X_2D_JS17' : 609, # S. Jana and P. Samal, J. Phys. Chem. A 121, 4804 (2017)
'MGGA_X_2D_PRHG07' : 210, # S. Pittalis, E. Rasanen, N. Helbig, and E. K. U. Gross, Phys. Rev. B 76, 235314 (2007)
'MGGA_X_2D_PRHG07_PRP10' : 211, # S. Pittalis, E. Rasanen, N. Helbig, and E. K. U. Gross, Phys. Rev. B 76, 235314 (2007)
'MGGA_X_B00' : 284, # A. D. Becke, J. Chem. Phys. 112, 4020 (2000)
'MGGA_X_BJ06' : 207, # A. D. Becke and E. R. Johnson, J. Chem. Phys. 124, 221101 (2006)
'MGGA_X_BLOC' : 244, # L. A. Constantin, E. Fabiano, and F. Della Sala, J. Chem. Theory Comput. 9, 2256 (2013)
'MGGA_X_BR89' : 206, # A. D. Becke and M. R. Roussel, Phys. Rev. A 39, 3761 (1989)
'MGGA_X_BR89_1' : 214, # A. D. Becke and M. R. Roussel, Phys. Rev. A 39, 3761 (1989)
'MGGA_X_BR89_EXPLICIT' : 586, # A. D. Becke and M. R. Roussel, Phys. Rev. A 39, 3761 (1989)
'MGGA_X_BR89_EXPLICIT_1' : 602, # A. D. Becke and M. R. Roussel, Phys. Rev. A 39, 3761 (1989)
'MGGA_X_EDMGGA' : 686, # J. Tao, J. Chem. Phys. 115, 3519 (2001)
'MGGA_X_GDME_0' : 689, # R. M. Koehl, G. K. Odom, and G. E. Scuseria, Mol. Phys. 87, 835 (1996)
'MGGA_X_GDME_KOS' : 690, # R. M. Koehl, G. K. Odom, and G. E. Scuseria, Mol. Phys. 87, 835 (1996)
'MGGA_X_GDME_NV' : 687, # J. W. Negele and D. Vautherin, Phys. Rev. C 5, 1472 (1972)
'MGGA_X_GDME_VT' : 691, # R. M. Koehl, G. K. Odom, and G. E. Scuseria, Mol. Phys. 87, 835 (1996)
'MGGA_X_GVT4' : 204, # T. V. Voorhis and G. E. Scuseria, J. Chem. Phys. 109, 400 (1998)
'MGGA_X_GX' : 575, # P.-F. Loos, J. Chem. Phys. 146, 114108 (2017)
'MGGA_X_HLTA' : 698, # S. Lehtola and M. A. L. Marques, Meta-local density functionals: a new rung on jacob's ladder, (2020), arXiv:2006.16835 [physics.chem-ph]
'MGGA_X_JK' : 256, # P. Jemmer and P. J. Knowles, Phys. Rev. A 51, 3571 (1995)
'MGGA_X_LTA' : 201, # M. Ernzerhof and G. E. Scuseria, J. Chem. Phys. 111, 911 (1999)
'MGGA_X_M06_L' : 203, # Y. Zhao and D. G. Truhlar, J. Chem. Phys. 125, 194101 (2006)
'MGGA_X_M11_L' : 226, # R. Peverati and D. G. Truhlar, J. Phys. Chem. Lett. 3, 117 (2012)
'MGGA_X_MBEEF' : 249, # J. Wellendorff, K. T. Lundgaard, K. W. Jacobsen, and T. Bligaard, J. Chem. Phys. 140, 144107 (2014)
'MGGA_X_MBEEFVDW' : 250, # K. T. Lundgaard, J. Wellendorff, J. Voss, K. W. Jacobsen, and T. Bligaard, Phys. Rev. B 93, 235162 (2016)
'MGGA_X_MBR' : 716, # A. Patra, S. Jana, H. Myneni, and P. Samal, Phys. Chem. Chem. Phys. 21, 19639 (2019)
'MGGA_X_MBRXC_BG' : 696, # B. Patra, S. Jana, L. A. Constantin, and P. Samal, Phys. Rev. B 100, 045147 (2019)
'MGGA_X_MBRXH_BG' : 697, # B. Patra, S. Jana, L. A. Constantin, and P. Samal, Phys. Rev. B 100, 045147 (2019)
'MGGA_X_MGGAC' : 711, # B. Patra, S. Jana, L. A. Constantin, and P. Samal, Phys. Rev. B 100, 155140 (2019)
'MGGA_X_MK00' : 230, # F. R. Manby and P. J. Knowles, J. Chem. Phys. 112, 7002 (2000)
'MGGA_X_MK00B' : 243, # F. R. Manby and P. J. Knowles, J. Chem. Phys. 112, 7002 (2000)
'MGGA_X_MN12_L' : 227, # R. Peverati and D. G. Truhlar, Phys. Chem. Chem. Phys. 14, 13171 (2012)
'MGGA_X_MN15_L' : 260, # H. S. Yu, X. He, and D. G. Truhlar, J. Chem. Theory Comput. 12, 1280 (2016)
'MGGA_X_MODTPSS' : 245, # J. P. Perdew, A. Ruzsinszky, J. Tao, G. I. Csonka, and G. E. Scuseria, Phys. Rev. A 76, 042506 (2007)
'MGGA_X_MS0' : 221, # J. Sun, B. Xiao, and A. Ruzsinszky, J. Chem. Phys. 137, 051101 (2012)
'MGGA_X_MS1' : 222, # J. Sun, R. Haunschild, B. Xiao, I. W. Bulik, G. E. Scuseria, and J. P. Perdew, J. Chem. Phys. 138, 044113 (2013)
'MGGA_X_MS2' : 223, # J. Sun, R. Haunschild, B. Xiao, I. W. Bulik, G. E. Scuseria, and J. P. Perdew, J. Chem. Phys. 138, 044113 (2013)
'MGGA_X_MS2_REV' : 228, # J. Sun, R. Haunschild, B. Xiao, I. W. Bulik, G. E. Scuseria, and J. P. Perdew, J. Chem. Phys. 138, 044113 (2013)
'MGGA_X_MS2B' : 300, # J. W. Furness and J. Sun, Phys. Rev. B 99, 041119 (2019)
'MGGA_X_MS2BS' : 301, # J. W. Furness and J. Sun, ArXiv e-prints (2018), arXiv:1805.11707v1 [physics.chem-ph]
'MGGA_X_MVS' : 257, # J. Sun, J. P. Perdew, and A. Ruzsinszky, Proc. Natl. Acad. Sci. U. S. A. 112, 685 (2015)
'MGGA_X_MVSB' : 302, # J. W. Furness and J. Sun, ArXiv e-prints (2018), arXiv:1805.11707v1 [physics.chem-ph]
'MGGA_X_MVSBS' : 303, # J. W. Furness and J. Sun, ArXiv e-prints (2018), arXiv:1805.11707v1 [physics.chem-ph]
'MGGA_X_PBE_GX' : 576, # P.-F. Loos, J. Chem. Phys. 146, 114108 (2017)
'MGGA_X_PKZB' : 213, # J. P. Perdew, S. Kurth, A. Zupan, and P. Blaha, Phys. Rev. Lett. 82, 2544 (1999)
'MGGA_X_R2SCAN' : 497, # J. W. Furness, A. D. Kaplan, J. Ning, J. P. Perdew, and J. Sun, J. Phys. Chem. Lett. 11, 8208 (2020)
'MGGA_X_R2SCANL' : 718, # D. Mejia-Rodriguez and S. B. Trickey, Phys. Rev. B 102, 121109 (2020)
'MGGA_X_REGTPSS' : 603, # A. Ruzsinszky, J. Sun, B. Xiao, and G. I. Csonka, J. Chem. Theory Comput. 8, 2078 (2012)
'MGGA_X_REVM06_L' : 293, # Y. Wang, X. Jin, H. S. Yu, D. G. Truhlar, and X. He, Proc. Natl. Acad. Sci. U. S. A. 114, 8487 (2017)
'MGGA_X_REVSCAN' : 581, # P. D. Mezei, G. I. Csonka, and M. Kallay, J. Chem. Theory Comput. 14, 2469 (2018)
'MGGA_X_REVSCANL' : 701, # D. Mejia-Rodriguez and S. B. Trickey, Phys. Rev. A 96, 052512 (2017)
'MGGA_X_REVTM' : 693, # S. Jana, K. Sharma, and P. Samal, J. Phys. Chem. A 123, 6356 (2019)
'MGGA_X_REVTPSS' : 212, # J. P. Perdew, A. Ruzsinszky, G. I. Csonka, L. A. Constantin, and J. Sun, Phys. Rev. Lett. 103, 026403 (2009)
'MGGA_X_RLDA' : 688, # X. Campi and A. Bouyssy, Phys. Lett. B 73, 263 (1978)
'MGGA_X_RPP09' : 209, # E. Rasanen, S. Pittalis, and C. R. Proetto, J. Chem. Phys. 132, 044112 (2010)
'MGGA_X_RSCAN' : 493, # A. P. Bartok and J. R. Yates, J. Chem. Phys. 150, 161101 (2019)
'MGGA_X_RTPSS' : 299, # A. J. Garza, A. T. Bell, and M. Head-Gordon, J. Chem. Theory Comput. 14, 3083 (2018)
'MGGA_X_SA_TPSS' : 542, # L. A. Constantin, E. Fabiano, J. M. Pitarke, and F. Della Sala, Phys. Rev. B 93, 115127 (2016)
'MGGA_X_SCAN' : 263, # J. Sun, A. Ruzsinszky, and J. P. Perdew, Phys. Rev. Lett. 115, 036402 (2015)
'MGGA_X_SCANL' : 700, # D. Mejia-Rodriguez and S. B. Trickey, Phys. Rev. A 96, 052512 (2017)
'MGGA_X_TASK' : 707, # T. Aschebrock and S. Kummel, Phys. Rev. Res. 1, 033082 (2019)
'MGGA_X_TAU_HCTH' : 205, # A. D. Boese and N. C. Handy, J. Chem. Phys. 116, 9559 (2002)
'MGGA_X_TB09' : 208, # F. Tran and P. Blaha, Phys. Rev. Lett. 102, 226401 (2009)
'MGGA_X_TH' : 225, # T. Tsuneda and K. Hirao, Phys. Rev. B 62, 15527 (2000)
'MGGA_X_TLDA' : 685, # F. G. Eich and M. Hellgren, J. Chem. Phys. 141, 224107 (2014)
'MGGA_X_TM' : 540, # J. Tao and Y. Mo, Phys. Rev. Lett. 117, 073001 (2016)
'MGGA_X_TPSS' : 202, # J. Tao, J. P. Perdew, V. N. Staroverov, and G. E. Scuseria, Phys. Rev. Lett. 91, 146401 (2003)
'MGGA_X_VT84' : 541, # J. M. del Campo, J. L. Gazquez, S. Trickey, and A. Vela, Chem. Phys. Lett. 543, 179 (2012)
'MGGA_XC_B97M_V' : 254, # N. Mardirossian and M. Head-Gordon, J. Chem. Phys. 142, 074111 (2015)
'MGGA_XC_CC06' : 229, # A. C. Cancio and M. Y. Chou, Phys. Rev. B 74, 081202 (2006)
'MGGA_XC_HLE17' : 288, # P. Verma and D. G. Truhlar, J. Phys. Chem. C 121, 7144 (2017)
'MGGA_XC_LP90' : 564, # C. Lee and R. G. Parr, Phys. Rev. A 42, 193 (1990)
'MGGA_XC_OTPSS_D' : 64 , # L. Goerigk and S. Grimme, J. Chem. Theory Comput. 6, 107 (2010)
'MGGA_XC_TPSSLYP1W' : 242, # E. E. Dahlke and D. G. Truhlar, J. Phys. Chem. B 109, 15677 (2005)
'MGGA_XC_ZLP' : 42 , # Q. Zhao, M. Levy, and R. G. Parr, Phys. Rev. A 47, 918 (1993)
'HYB_MGGA_X_BMK' : 279, # A. D. Boese and J. M. L. Martin, J. Chem. Phys. 121, 3405 (2004)
'HYB_MGGA_X_DLDF' : 36 , # K. Pernal, R. Podeszwa, K. Patkowski, and K. Szalewicz, Phys. Rev. Lett. 103, 263201 (2009)
'HYB_MGGA_X_JS18' : 705, # S. Jana and P. Samal, Phys. Chem. Chem. Phys. 20, 8999 (2018)
'HYB_MGGA_X_M05' : 438, # Y. Zhao, N. E. Schultz, and D. G. Truhlar, J. Chem. Phys. 123, 161103 (2005)
'HYB_MGGA_X_M05_2X' : 439, # Y. Zhao, N. E. Schultz, and D. G. Truhlar, J. Chem. Theory Comput. 2, 364 (2006)
'HYB_MGGA_X_M06' : 449, # Y. Zhao and D. G. Truhlar, Theor. Chem. Acc. 120, 215 (2008)
'HYB_MGGA_X_M06_2X' : 450, # Y. Zhao and D. G. Truhlar, Theor. Chem. Acc. 120, 215 (2008)
'HYB_MGGA_X_M06_HF' : 444, # Y. Zhao and D. G. Truhlar, J. Phys. Chem. A 110, 13126 (2006)
'HYB_MGGA_X_M06_SX' : 310, # Y. Wang, P. Verma, L. Zhang, Y. Li, Z. Liu, D. G. Truhlar, and X. He, Proc. Natl. Acad. Sci. U. S. A. 117, 2294 (2020), https://www.pnas.org/content/117/5/2294.full.pdf
'HYB_MGGA_X_M08_HX' : 295, # Y. Zhao and D. G. Truhlar, J. Chem. Theory Comput. 4, 1849 (2008)
'HYB_MGGA_X_M08_SO' : 296, # Y. Zhao and D. G. Truhlar, J. Chem. Theory Comput. 4, 1849 (2008)
'HYB_MGGA_X_M11' : 297, # R. Peverati and D. G. Truhlar, J. Phys. Chem. Lett. 2, 2810 (2011)
'HYB_MGGA_X_MN12_SX' : 248, # R. Peverati and D. G. Truhlar, Phys. Chem. Chem. Phys. 14, 16187 (2012)
'HYB_MGGA_X_MN15' : 268, # H. S. Yu, X. He, S. L. Li, and D. G. Truhlar, Chem. Sci. 7, 5032 (2016)
'HYB_MGGA_X_MS2H' : 224, # J. Sun, R. Haunschild, B. Xiao, I. W. Bulik, G. E. Scuseria, and J. P. Perdew, J. Chem. Phys. 138, 044113 (2013)
'HYB_MGGA_X_MVSH' : 474, # J. Sun, J. P. Perdew, and A. Ruzsinszky, Proc. Natl. Acad. Sci. U. S. A. 112, 685 (2015)
'HYB_MGGA_X_PJS18' : 706, # B. Patra, S. Jana, and P. Samal, Phys. Chem. Chem. Phys. 20, 8991 (2018)
'HYB_MGGA_X_REVM06' : 305, # Y. Wang, P. Verma, X. Jin, D. G. Truhlar, and X. He, Proc. Natl. Acad. Sci. U. S. A. 115, 10257 (2018)
'HYB_MGGA_X_REVM11' : 304, # P. Verma, Y. Wang, S. Ghosh, X. He, and D. G. Truhlar, J. Phys. Chem. A 123, 2966 (2019)
'HYB_MGGA_X_REVSCAN0' : 583, # P. D. Mezei, G. I. Csonka, and M. Kallay, J. Chem. Theory Comput. 14, 2469 (2018)
'HYB_MGGA_X_SCAN0' : 264, # K. Hui and J.-D. Chai, J. Chem. Phys. 144, 044114 (2016)
'HYB_MGGA_X_TAU_HCTH' : 282, # A. D. Boese and N. C. Handy, J. Chem. Phys. 116, 9559 (2002)
'HYB_MGGA_XC_B0KCIS' : 563, # J. Toulouse, A. Savin, and C. Adamo, J. Chem. Phys. 117, 10465 (2002)
'HYB_MGGA_XC_B86B95' : 441, # A. D. Becke, J. Chem. Phys. 104, 1040 (1996)
'HYB_MGGA_XC_B88B95' : 440, # A. D. Becke, J. Chem. Phys. 104, 1040 (1996)
'HYB_MGGA_XC_B94_HYB' : 398, # A. D. Becke, Int. J. Quantum Chem. 52, 625 (1994)
'HYB_MGGA_XC_B98' : 598, # A. D. Becke, J. Chem. Phys. 109, 2092 (1998)
'HYB_MGGA_XC_BB1K' : 443, # Y. Zhao, B. J. Lynch, and D. G. Truhlar, J. Phys. Chem. A 108, 2715 (2004)
'HYB_MGGA_XC_EDMGGAH' : 695, # J. Tao, J. Chem. Phys. 116, 2335 (2002)
'HYB_MGGA_XC_MPW1B95' : 445, # Y. Zhao and D. G. Truhlar, J. Phys. Chem. A 108, 6908 (2004)
'HYB_MGGA_XC_MPW1KCIS' : 566, # Y. Zhao, N. Gonzalez-Garcia, and D. G. Truhlar, J. Phys. Chem. A 109, 2012 (2005)
'HYB_MGGA_XC_MPWB1K' : 446, # Y. Zhao and D. G. Truhlar, J. Phys. Chem. A 108, 6908 (2004)
'HYB_MGGA_XC_MPWKCIS1K' : 567, # Y. Zhao, N. Gonzalez-Garcia, and D. G. Truhlar, J. Phys. Chem. A 109, 2012 (2005)
'HYB_MGGA_XC_PBE1KCIS' : 568, # Y. Zhao and D. G. Truhlar, J. Chem. Theory Comput. 1, 415 (2005)
'HYB_MGGA_XC_PW6B95' : 451, # Y. Zhao and D. G. Truhlar, J. Phys. Chem. A 109, 5656 (2005)
'HYB_MGGA_XC_PW86B95' : 442, # A. D. Becke, J. Chem. Phys. 104, 1040 (1996)
'HYB_MGGA_XC_PWB6K' : 452, # Y. Zhao and D. G. Truhlar, J. Phys. Chem. A 109, 5656 (2005)
'HYB_MGGA_XC_REVTPSSH' : 458, # G. I. Csonka, J. P. Perdew, and A. Ruzsinszky, J. Chem. Theory Comput. 6, 3688 (2010)
'HYB_MGGA_XC_TPSS0' : 396, # S. Grimme, J. Phys. Chem. A 109, 3067 (2005)
'HYB_MGGA_XC_TPSS1KCIS' : 569, # Y. Zhao, B. J. Lynch, and D. G. Truhlar, Phys. Chem. Chem. Phys. 7, 43 (2005)
'HYB_MGGA_XC_TPSSH' : 457, # V. N. Staroverov, G. E. Scuseria, J. Tao, and J. P. Perdew, J. Chem. Phys. 119, 12129 (2003)
'HYB_MGGA_XC_WB97M_V' : 531, # N. Mardirossian and M. Head-Gordon, J. Chem. Phys. 144, 214110 (2016)
'HYB_MGGA_XC_X1B95' : 447, # Y. Zhao and D. G. Truhlar, J. Phys. Chem. A 108, 6908 (2004)
'HYB_MGGA_XC_XB1K' : 448, # Y. Zhao and D. G. Truhlar, J. Phys. Chem. A 108, 6908 (2004)
}
#PROBLEMATIC_XC = dict([(XC_CODES[x], x) for x in
# ('GGA_C_SPBE', 'MGGA_X_REVTPSS')])
PROBLEMATIC_XC = dict([])
def _xc_key_without_underscore(xc_keys):
new_xc = []
for key, xc_id in xc_keys.items():
for delimeter in ('_XC_', '_X_', '_C_', '_K_'):
if delimeter in key:
key0, key1 = key.split(delimeter)
new_key1 = key1.replace('_', '').replace('-', '')
if key1 != new_key1:
new_xc.append((key0+delimeter+new_key1, xc_id))
break
return new_xc
XC_CODES.update(_xc_key_without_underscore(XC_CODES))
del(_xc_key_without_underscore)
#
# alias
#
XC_CODES.update({
'GGA_C_BCGP' : 'GGA_C_ACGGA',
'LDA' : 1 ,
'SLATER' : 1 ,
'VWN3' : 8,
'VWNRPA' : 8,
'VWN5' : 7,
'B88' : 106,
'PBE0' : 406,
'PBE1PBE' : 406,
'OPTXCORR' : '0.7344536875999693*SLATER - 0.6984752285760186*OPTX,',
'B3LYP' : 'B3LYP5', # VWN5 version
'B3LYP5' : '.2*HF + .08*SLATER + .72*B88, .81*LYP + .19*VWN',
'B3LYPG' : 402, # VWN3, used by Gaussian
'B3P86' : 'B3P865', # VWN5 version
'B3P865' : '.2*HF + .08*SLATER + .72*B88, .81*P86 + .19*VWN',
# FIXME: Check if Gaussian takes a different form for B3P86
#'B3P86G' : 403, # VWN3, used by Gaussian
'B3P86G' : '.2*HF + .08*SLATER + .72*B88, .81*P86 + .19*VWN3',
'B3PW91' : 'B3PW915',
'B3PW915' : '.2*HF + .08*SLATER + .72*B88, .81*PW91 + .19*VWN',
#'B3PW91G' : '.2*HF + .08*SLATER + .72*B88, .81*PW91 + .19*VWN3',
'B3PW91G' : 401,
#'O3LYP5' : '.1161*HF + .9262*SLATER + .8133*OPTXCORR, .81*LYP + .19*VWN5',
#'O3LYPG' : '.1161*HF + .9262*SLATER + .8133*OPTXCORR, .81*LYP + .19*VWN3',
'O3LYP' : 404, # in libxc == '.1161*HF + 0.071006917*SLATER + .8133*OPTX, .81*LYP + .19*VWN5', may be erroreous
'MPW3PW' : 'MPW3PW5', # VWN5 version
'MPW3PW5' : '.2*HF + .08*SLATER + .72*MPW91, .81*PW91 + .19*VWN',
'MPW3PWG' : 415, # VWN3, used by Gaussian
'MPW3LYP' : 'MPW3LYP5', # VWN5 version
'MPW3LYP5' : '.218*HF + .073*SLATER + .709*MPW91, .871*LYP + .129*VWN',
'MPW3LYPG' : 419, # VWN3, used by Gaussian
'REVB3LYP' : 'REVB3LYP5', # VWN5 version
'REVB3LYP5' : '.2*HF + .13*SLATER + .67*B88, .84*LYP + .16*VWN',
'REVB3LYPG' : 454, # VWN3, used by Gaussian
'X3LYP' : 'X3LYP5', # VWN5 version
'X3LYP5' : '.218*HF + .073*SLATER + .542385*B88 + .166615*PW91, .871*LYP + .129*VWN',
'X3LYPG' : 411, # VWN3, used by Gaussian
'CAMB3LYP' : 'HYB_GGA_XC_CAM_B3LYP',
'CAMYBLYP' : 'HYB_GGA_XC_CAMY_BLYP',
'CAMYB3LYP' : 'HYB_GGA_XC_CAMY_B3LYP',
'B5050LYP' : '.5*HF + .08*SLATER + .42*B88, .81*LYP + .19*VWN',
'MPW1LYP' : '.25*HF + .75*MPW91, LYP',
'MPW1PBE' : '.25*HF + .75*MPW91, PBE',
'PBE50' : '.5*HF + .5*PBE, PBE',
'REVPBE0' : '.25*HF + .75*PBE_R, PBE',
'B1B95' : 440,
'TPSS0' : '.25*HF + .75*TPSS, TPSS',
}) # noqa: E501
XC_KEYS = set(XC_CODES.keys())
# Some XC functionals have conventional name, like M06-L means M06-L for X
# functional and M06-L for C functional, PBE mean PBE-X plus PBE-C. If the
# conventional name was placed in the XC_CODES, it may lead to recursive
# reference when parsing the xc description. These names (as exceptions of
# XC_CODES) are listed in XC_ALIAS below and they should be treated as a
# shortcut for XC functional.
XC_ALIAS = {
# Conventional name : name in XC_CODES
'BLYP' : 'B88,LYP',
'BP86' : 'B88,P86',
'PW91' : 'PW91,PW91',
'PBE' : 'PBE,PBE',
'REVPBE' : 'PBE_R,PBE',
'PBESOL' : 'PBE_SOL,PBE_SOL',
'PKZB' : 'PKZB,PKZB',
'TPSS' : 'TPSS,TPSS',
'REVTPSS' : 'REVTPSS,REVTPSS',
'SCAN' : 'SCAN,SCAN',
'RSCAN' : 'RSCAN,RSCAN',
'R2SCAN' : 'R2SCAN,R2SCAN',
'SCANL' : 'SCANL,SCANL',
'R2SCANL' : 'R2SCANL,R2SCANL',
'SOGGA' : 'SOGGA,PBE',
'BLOC' : 'BLOC,TPSSLOC',
'OLYP' : 'OPTX,LYP',
'OPBE' : 'OPTX,PBE',
'RPBE' : 'RPBE,PBE',
'BPBE' : 'B88,PBE',
'MPW91' : 'MPW91,PW91',
'HFLYP' : 'HF,LYP',
'HFPW92' : 'HF,PW_MOD',
'SPW92' : 'SLATER,PW_MOD',
'SVWN' : 'SLATER,VWN',
'MS0' : 'MS0,REGTPSS',
'MS1' : 'MS1,REGTPSS',
'MS2' : 'MS2,REGTPSS',
'MS2H' : 'MS2H,REGTPSS',
'MVS' : 'MVS,REGTPSS',
'MVSH' : 'MVSH,REGTPSS',
'SOGGA11' : 'SOGGA11,SOGGA11',
'SOGGA11_X' : 'SOGGA11_X,SOGGA11_X',
'KT1' : 'KT1,VWN',
'KT2' : 'GGA_XC_KT2',
'KT3' : 'GGA_XC_KT3',
'DLDF' : 'DLDF,DLDF',
'GAM' : 'GAM,GAM',
'M06_L' : 'M06_L,M06_L',
'M06_SX' : 'M06_SX,M06_SX',
'M11_L' : 'M11_L,M11_L',
'MN12_L' : 'MN12_L,MN12_L',
'MN15_L' : 'MN15_L,MN15_L',
'N12' : 'N12,N12',
'N12_SX' : 'N12_SX,N12_SX',
'MN12_SX' : 'MN12_SX,MN12_SX',
'MN15' : 'MN15,MN15',
'MBEEF' : 'MBEEF,PBE_SOL',
'SCAN0' : 'SCAN0,SCAN',
'PBEOP' : 'PBE,OP_PBE',
'BOP' : 'B88,OP_B88',
# new in libxc-4.2.3
'REVSCAN' : 'MGGA_X_REVSCAN,MGGA_C_REVSCAN',
'REVSCAN_VV10' : 'MGGA_X_REVSCAN,MGGA_C_REVSCAN_VV10',
'SCAN_VV10' : 'MGGA_X_SCAN,MGGA_C_SCAN_VV10',
'SCAN_RVV10' : 'MGGA_X_SCAN,MGGA_C_SCAN_RVV10',
'M05' : 'HYB_MGGA_X_M05,MGGA_C_M05',
'M06' : 'HYB_MGGA_X_M06,MGGA_C_M06',
'M05_2X' : 'HYB_MGGA_X_M05_2X,MGGA_C_M05_2X',
'M06_2X' : 'HYB_MGGA_X_M06_2X,MGGA_C_M06_2X',
# extra aliases
'SOGGA11X' : 'SOGGA11_X',
'M06L' : 'M06_L',
'M11L' : 'M11_L',
'MN12L' : 'MN12_L',
'MN15L' : 'MN15_L',
'N12SX' : 'N12_SX',
'MN12SX' : 'MN12_SX',
'M052X' : 'M05_2X',
'M062X' : 'M06_2X',
} # noqa: E122
XC_ALIAS.update([(key.replace('-',''), XC_ALIAS[key])
for key in XC_ALIAS if '-' in key])
VV10_XC = set(('B97M_V', 'WB97M_V', 'WB97X_V', 'VV10', 'LC_VV10',
'REVSCAN_VV10',
'SCAN_VV10', 'SCAN_RVV10', 'SCANL_VV10', 'SCANL_RVV10'))
VV10_XC = VV10_XC.union(set([x.replace('_', '') for x in VV10_XC]))
def xc_reference(xc_code):
'''Returns the reference to the individual XC functional'''
hyb, fn_facs = parse_xc(xc_code)
refs = []
c_refs = (ctypes.c_char_p * 8)()
for xid, fac in fn_facs:
_itrf.LIBXC_xc_reference(xid, c_refs)
for ref in c_refs:
if ref:
refs.append(ref.decode("UTF-8"))
return refs
def xc_type(xc_code):
if xc_code is None:
return None
elif isinstance(xc_code, str):
if is_nlc(xc_code):
return 'NLC'
hyb, fn_facs = parse_xc(xc_code)
else:
fn_facs = [(xc_code, 1)] # mimic fn_facs
if not fn_facs:
return 'HF'
elif all(_itrf.LIBXC_is_lda(ctypes.c_int(xid)) for xid, fac in fn_facs):
return 'LDA'
elif any(_itrf.LIBXC_is_meta_gga(ctypes.c_int(xid)) for xid, fac in fn_facs):
return 'MGGA'
else:
# any(_itrf.LIBXC_is_gga(ctypes.c_int(xid)) for xid, fac in fn_facs)
# include hybrid_xc
return 'GGA'
def is_lda(xc_code):
return xc_type(xc_code) == 'LDA'
def is_hybrid_xc(xc_code):
if xc_code is None:
return False
elif isinstance(xc_code, str):
if xc_code.isdigit():
return _itrf.LIBXC_is_hybrid(ctypes.c_int(int(xc_code)))
else:
if 'HF' in xc_code:
return True
if hybrid_coeff(xc_code) != 0:
return True
if rsh_coeff(xc_code) != [0, 0, 0]:
return True
return False
elif isinstance(xc_code, int):
return _itrf.LIBXC_is_hybrid(ctypes.c_int(xc_code))
else:
return any((is_hybrid_xc(x) for x in xc_code))
def is_meta_gga(xc_code):
return xc_type(xc_code) == 'MGGA'
def is_gga(xc_code):
return xc_type(xc_code) == 'GGA'
def needs_laplacian(xc_code):
return _itrf.LIBXC_needs_laplacian(xc_code) != 0
def is_nlc(xc_code):
return '__VV10' in xc_code.upper()
def max_deriv_order(xc_code):
hyb, fn_facs = parse_xc(xc_code)
if fn_facs:
return min(_itrf.LIBXC_max_deriv_order(ctypes.c_int(xid)) for xid, fac in fn_facs)
else:
return 3
def test_deriv_order(xc_code, deriv, raise_error=False):
support = deriv <= max_deriv_order(xc_code)
if not support and raise_error:
from pyscf.dft import xcfun
msg = ('libxc library does not support derivative order %d for %s' %
(deriv, xc_code))
try:
if xcfun.test_deriv_order(xc_code, deriv, raise_error=False):
msg += ('''
This functional derivative is supported in the xcfun library.
The following code can be used to change the libxc library to xcfun library:
from pyscf.dft import xcfun
mf._numint.libxc = xcfun
''')
raise NotImplementedError(msg)
except KeyError as e:
sys.stderr.write('\n'+msg+'\n')
sys.stderr.write('%s not found in xcfun library\n\n' % xc_code)
raise e
return support
def hybrid_coeff(xc_code, spin=0):
'''Support recursively defining hybrid functional
'''
hyb, fn_facs = parse_xc(xc_code)
for xid, fac in fn_facs:
hyb[0] += fac * _itrf.LIBXC_hybrid_coeff(ctypes.c_int(xid))
return hyb[0]
def nlc_coeff(xc_code):
'''Get NLC coefficients
'''
nlc_code = None
if isinstance(xc_code, str) and '__VV10' in xc_code.upper():
xc_code, nlc_code = xc_code.upper().split('__', 1)
hyb, fn_facs = parse_xc(xc_code)
nlc_pars = [0, 0]
nlc_tmp = (ctypes.c_double*2)()
for xid, fac in fn_facs:
_itrf.LIBXC_nlc_coeff(xid, nlc_tmp)
nlc_pars[0] += nlc_tmp[0]
nlc_pars[1] += nlc_tmp[1]
if nlc_pars[0] == 0 and nlc_pars[1] == 0:
if nlc_code is not None:
# Use VV10 NLC parameters by default for the general case
_itrf.LIBXC_nlc_coeff(XC_CODES['GGA_XC_' + nlc_code], nlc_tmp)
nlc_pars[0] += nlc_tmp[0]
nlc_pars[1] += nlc_tmp[1]
else:
raise NotImplementedError(
'%s does not have NLC part. Available functionals are %s' %
(xc_code, ', '.join(VV10_XC.keys())))
return nlc_pars
def rsh_coeff(xc_code):
'''Range-separated parameter and HF exchange components: omega, alpha, beta
Exc_RSH = c_LR * LR_HFX + c_SR * SR_HFX + (1-c_SR) * Ex_SR + (1-c_LR) * Ex_LR + Ec
= alpha * HFX + beta * SR_HFX + (1-c_SR) * Ex_SR + (1-c_LR) * Ex_LR + Ec
= alpha * LR_HFX + hyb * SR_HFX + (1-c_SR) * Ex_SR + (1-c_LR) * Ex_LR + Ec
SR_HFX = < pi | e^{-omega r_{12}}/r_{12} | iq >
LR_HFX = < pi | (1-e^{-omega r_{12}})/r_{12} | iq >
alpha = c_LR
beta = c_SR - c_LR = hyb - alpha
'''
if xc_code is None:
return 0, 0, 0
check_omega = True
if isinstance(xc_code, str) and ',' in xc_code:
# Parse only X part for the RSH coefficients. This is to handle
# exceptions for C functionals such as M11.
xc_code = format_xc_code(xc_code)
xc_code = xc_code.split(',')[0] + ','
if 'SR_HF' in xc_code or 'LR_HF' in xc_code or 'RSH(' in xc_code:
check_omega = False
hyb, fn_facs = parse_xc(xc_code)
hyb, alpha, omega = hyb
beta = hyb - alpha
rsh_pars = [omega, alpha, beta]
rsh_tmp = (ctypes.c_double*3)()
_itrf.LIBXC_rsh_coeff(433, rsh_tmp)
for xid, fac in fn_facs:
_itrf.LIBXC_rsh_coeff(xid, rsh_tmp)
if rsh_pars[0] == 0:
rsh_pars[0] = rsh_tmp[0]
elif check_omega:
# Check functional is actually a CAM functional
if rsh_tmp[0] != 0 and not _itrf.LIBXC_is_cam_rsh(ctypes.c_int(xid)):
raise KeyError('Libxc functional %i employs a range separation '
'kernel that is not supported in PySCF' % xid)
# Check omega
if (rsh_tmp[0] != 0 and rsh_pars[0] != rsh_tmp[0]):
raise ValueError('Different values of omega found for RSH functionals')
rsh_pars[1] += rsh_tmp[1] * fac
rsh_pars[2] += rsh_tmp[2] * fac
return rsh_pars
def parse_xc_name(xc_name='LDA,VWN'):
'''Convert the XC functional name to libxc library internal ID.
'''
fn_facs = parse_xc(xc_name)[1]
return fn_facs[0][0], fn_facs[1][0]
def parse_xc(description):
r'''Rules to input functional description:
* The given functional description must be a one-line string.
* The functional description is case-insensitive.
* The functional description string has two parts, separated by ",". The
first part describes the exchange functional, the second is the correlation
functional.
- If "," was not in string, the entire string is considered as a
compound XC functional (including both X and C functionals, such as b3lyp).
- To input only X functional (without C functional), leave the second
part blank. E.g. description='slater,' means pure LDA functional.
- To neglect X functional (just apply C functional), leave the first
part blank. E.g. description=',vwn' means pure VWN functional.
- If compound XC functional is specified, no matter whehter it is in the
X part (the string in front of comma) or the C part (the string behind
comma), both X and C functionals of the compound XC functional will be
used.
* The functional name can be placed in arbitrary order. Two name needs to
be separated by operators "+" or "-". Blank spaces are ignored.
NOTE the parser only reads operators "+" "-" "*". / is not in support.
* A functional name can have at most one factor. If the factor is not
given, it is set to 1. Compound functional can be scaled as a unit. For
example '0.5*b3lyp' is equivalent to
'HF*0.1 + .04*LDA + .36*B88, .405*LYP + .095*VWN'
* String "HF" stands for exact exchange (HF K matrix). Putting "HF" in
correlation functional part is the same to putting "HF" in exchange
part.
* String "RSH" means range-separated operator. Its format is
RSH(omega, alpha, beta). Another way to input RSH is to use keywords
SR_HF and LR_HF: "SR_HF(0.1) * alpha_plus_beta" and "LR_HF(0.1) *
alpha" where the number in parenthesis is the value of omega.
* Be careful with the libxc convention on GGA functional, in which the LDA
contribution has been included.
Args:
xc_code : str
A string to describe the linear combination of different XC functionals.
The X and C functional are separated by comma like '.8*LDA+.2*B86,VWN'.
If "HF" was appeared in the string, it stands for the exact exchange.
rho : ndarray
Shape of ((*,N)) for electron density (and derivatives) if spin = 0;
Shape of ((*,N),(*,N)) for alpha/beta electron density (and derivatives) if spin > 0;
where N is number of grids.
rho (*,N) are ordered as (den,grad_x,grad_y,grad_z,laplacian,tau)
where grad_x = d/dx den, laplacian = \nabla^2 den, tau = 1/2(\nabla f)^2
In spin unrestricted case,
rho is ((den_u,grad_xu,grad_yu,grad_zu,laplacian_u,tau_u)
(den_d,grad_xd,grad_yd,grad_zd,laplacian_d,tau_d))
Kwargs:
spin : int
spin polarized if spin > 0
relativity : int
No effects.
verbose : int or object of :class:`Logger`
No effects.
Returns:
ex, vxc, fxc, kxc
where
* vxc = (vrho, vsigma, vlapl, vtau) for restricted case
* vxc for unrestricted case
| vrho[:,2] = (u, d)
| vsigma[:,3] = (uu, ud, dd)
| vlapl[:,2] = (u, d)
| vtau[:,2] = (u, d)
* fxc for restricted case:
(v2rho2, v2rhosigma, v2sigma2, v2lapl2, vtau2, v2rholapl, v2rhotau, v2lapltau, v2sigmalapl, v2sigmatau)
* fxc for unrestricted case:
| v2rho2[:,3] = (u_u, u_d, d_d)
| v2rhosigma[:,6] = (u_uu, u_ud, u_dd, d_uu, d_ud, d_dd)
| v2sigma2[:,6] = (uu_uu, uu_ud, uu_dd, ud_ud, ud_dd, dd_dd)
| v2lapl2[:,3]
| vtau2[:,3]
| v2rholapl[:,4]
| v2rhotau[:,4]
| v2lapltau[:,4]
| v2sigmalapl[:,6]
| v2sigmatau[:,6]
* kxc for restricted case:
v3rho3, v3rho2sigma, v3rhosigma2, v3sigma3,
v3rho2tau, v3rhosigmatau, v3rhotau2, v3sigma2tau, v3sigmatau2, v3tau3
* kxc for unrestricted case:
| v3rho3[:,4] = (u_u_u, u_u_d, u_d_d, d_d_d)
| v3rho2sigma[:,9] = (u_u_uu, u_u_ud, u_u_dd, u_d_uu, u_d_ud, u_d_dd, d_d_uu, d_d_ud, d_d_dd)
| v3rhosigma2[:,12] = (u_uu_uu, u_uu_ud, u_uu_dd, u_ud_ud, u_ud_dd, u_dd_dd, d_uu_uu, d_uu_ud, d_uu_dd, d_ud_ud, d_ud_dd, d_dd_dd)
| v3sigma3[:,10] = (uu_uu_uu, uu_uu_ud, uu_uu_dd, uu_ud_ud, uu_ud_dd, uu_dd_dd, ud_ud_ud, ud_ud_dd, ud_dd_dd, dd_dd_dd)
| v3rho2tau
| v3rhosigmatau
| v3rhotau2
| v3sigma2tau
| v3sigmatau2
| v3tau3
see also libxc_itrf.c
''' # noqa: E501
hyb = [0, 0, 0] # hybrid, alpha, omega (== SR_HF, LR_HF, omega)
if description is None:
return hyb, []
elif isinstance(description, int):
return hyb, [(description, 1.)]
elif not isinstance(description, str): #isinstance(description, (tuple,list)):
return parse_xc('%s,%s' % tuple(description))
def assign_omega(omega, hyb_or_sr, lr=0):
if hyb[2] == omega or omega == 0:
hyb[0] += hyb_or_sr
hyb[1] += lr
elif hyb[2] == 0:
hyb[0] += hyb_or_sr
hyb[1] += lr
hyb[2] = omega
else:
raise ValueError('Different values of omega found for RSH functionals')
fn_facs = []
def parse_token(token, ftype, search_xc_alias=False):
if token:
if token[0] == '-':
sign = -1
token = token[1:]
else:
sign = 1
if '*' in token:
fac, key = token.split('*')
if fac[0].isalpha():
fac, key = key, fac
fac = sign * float(fac)
else:
fac, key = sign, token
if key[:3] == 'RSH':
# RSH(alpha; beta; omega): Range-separated-hybrid functional
# See also utils.format_xc_code
alpha, beta, omega = [float(x) for x in key[4:-1].split(';')]
assign_omega(omega, fac*(alpha+beta), fac*alpha)
elif key == 'HF':
hyb[0] += fac
hyb[1] += fac # also add to LR_HF
elif 'SR_HF' in key:
if '(' in key:
omega = float(key.split('(')[1].split(')')[0])
assign_omega(omega, fac, 0)
else: # Assuming this omega the same to the existing omega
hyb[0] += fac
elif 'LR_HF' in key:
if '(' in key:
omega = float(key.split('(')[1].split(')')[0])
assign_omega(omega, 0, fac)
else:
hyb[1] += fac # == alpha
elif key.isdigit():
fn_facs.append((int(key), fac))
else:
if search_xc_alias and key in XC_ALIAS:
x_id = XC_ALIAS[key]
elif key in XC_CODES:
x_id = XC_CODES[key]
else:
possible_xc_for = fpossible_dic[ftype]
possible_xc = XC_KEYS.intersection(possible_xc_for(key))
if possible_xc:
if len(possible_xc) > 1:
sys.stderr.write('Possible xc_code %s matches %s. '
% (list(possible_xc), key))
for x_id in possible_xc: # Prefer X functional
if '_X_' in x_id:
break
else:
x_id = possible_xc.pop()
sys.stderr.write('XC parser takes %s\n' % x_id)
sys.stderr.write('You can add prefix to %s for a '
'specific functional (e.g. X_%s, '
'HYB_MGGA_X_%s)\n'
% (key, key, key))
else:
x_id = possible_xc.pop()
x_id = XC_CODES[x_id]
else:
raise KeyError('Unknown %s functional %s' % (ftype, key))
if isinstance(x_id, str):
hyb1, fn_facs1 = parse_xc(x_id)
# Recursively scale the composed functional, to support e.g. '0.5*b3lyp'
if hyb1[0] != 0 or hyb1[1] != 0:
assign_omega(hyb1[2], hyb1[0]*fac, hyb1[1]*fac)
fn_facs.extend([(xid, c*fac) for xid, c in fn_facs1])
elif x_id is None:
raise NotImplementedError('%s functional %s' % (ftype, key))
else:
fn_facs.append((x_id, fac))
def possible_x_for(key):
return set((key,
'LDA_X_'+key, 'GGA_X_'+key, 'MGGA_X_'+key,
'HYB_GGA_X_'+key, 'HYB_MGGA_X_'+key))
def possible_xc_for(key):
return set((key, 'LDA_XC_'+key, 'GGA_XC_'+key, 'MGGA_XC_'+key,
'HYB_GGA_XC_'+key, 'HYB_MGGA_XC_'+key))
def possible_k_for(key):
return set((key,
'LDA_K_'+key, 'GGA_K_'+key,))
def possible_x_k_for(key):
return possible_x_for(key).union(possible_k_for(key))
def possible_c_for(key):
return set((key,
'LDA_C_'+key, 'GGA_C_'+key, 'MGGA_C_'+key))
fpossible_dic = {'X': possible_x_for,
'C': possible_c_for,
'compound XC': possible_xc_for,
'K': possible_k_for,
'X or K': possible_x_k_for}
description = format_xc_code(description)
if '-' in description: # To handle e.g. M06-L
for key in _NAME_WITH_DASH:
if key in description:
description = description.replace(key, _NAME_WITH_DASH[key])
if ',' in description:
x_code, c_code = description.split(',')
for token in x_code.replace('-', '+-').replace(';+', ';').split('+'):
parse_token(token, 'X or K')
for token in c_code.replace('-', '+-').replace(';+', ';').split('+'):
parse_token(token, 'C')
else:
for token in description.replace('-', '+-').replace(';+', ';').split('+'):
parse_token(token, 'compound XC', search_xc_alias=True)
if hyb[2] == 0: # No omega is assigned. LR_HF is 0 for normal Coulomb operator
hyb[1] = 0
return hyb, remove_dup(fn_facs)
_NAME_WITH_DASH = {'SR-HF' : 'SR_HF',
'LR-HF' : 'LR_HF',
'OTPSS-D' : 'OTPSS_D',
'B97-1' : 'B97_1',
'B97-2' : 'B97_2',
'B97-3' : 'B97_3',
'B97-K' : 'B97_K',
'B97-D' : 'B97_D',
'HCTH-93' : 'HCTH_93',
'HCTH-120' : 'HCTH_120',
'HCTH-147' : 'HCTH_147',
'HCTH-407' : 'HCTH_407',
'WB97X-D' : 'WB97X_D',
'WB97X-V' : 'WB97X_V',
'WB97M-V' : 'WB97M_V',
'B97M-V' : 'B97M_V',
'M05-2X' : 'M05_2X',
'M06-L' : 'M06_L',
'M06-HF' : 'M06_HF',
'M06-2X' : 'M06_2X',
'M08-HX' : 'M08_HX',
'M08-SO' : 'M08_SO',
'M11-L' : 'M11_L',
'MN12-L' : 'MN12_L',
'MN15-L' : 'MN15_L',
'MN12-SX' : 'MN12_SX',
'N12-SX' : 'N12_SX',
'LRC-WPBE' : 'LRC_WPBE',
'LRC-WPBEH': 'LRC_WPBEH',
'LC-VV10' : 'LC_VV10',
'CAM-B3LYP': 'CAM_B3LYP'}
def eval_xc(xc_code, rho, spin=0, relativity=0, deriv=1, omega=None, verbose=None):
r'''Interface to call libxc library to evaluate XC functional, potential
and functional derivatives.
* The given functional xc_code must be a one-line string.
* The functional xc_code is case-insensitive.
* The functional xc_code string has two parts, separated by ",". The
first part describes the exchange functional, the second part sets the
correlation functional.
- If "," not appeared in string, the entire string is treated as the
name of a compound functional (containing both the exchange and
the correlation functional) which was declared in the functional
aliases list. The full list of functional aliases can be obtained by
calling the function pyscf.dft.xcfun.XC_ALIAS.keys() .
If the string was not found in the aliased functional list, it is
treated as X functional.
- To input only X functional (without C functional), leave the second
part blank. E.g. description='slater,' means a functional with LDA
contribution only.
- To neglect the contribution of X functional (just apply C functional),
leave blank in the first part, e.g. description=',vwn' means a
functional with VWN only.
- If compound XC functional is specified, no matter whether it is in the
X part (the string in front of comma) or the C part (the string behind
comma), both X and C functionals of the compound XC functional will be
used.
* The functional name can be placed in arbitrary order. Two names need to
be separated by operators "+" or "-". Blank spaces are ignored.
NOTE the parser only reads operators "+" "-" "*". / is not supported.
* A functional name can have at most one factor. If the factor is not
given, it is set to 1. Compound functional can be scaled as a unit. For
example '0.5*b3lyp' is equivalent to
'HF*0.1 + .04*LDA + .36*B88, .405*LYP + .095*VWN'
* String "HF" stands for exact exchange (HF K matrix). "HF" can be put in
the correlation functional part (after comma). Putting "HF" in the
correlation part is the same to putting "HF" in the exchange part.
* String "RSH" means range-separated operator. Its format is
RSH(omega, alpha, beta). Another way to input RSH is to use keywords
SR_HF and LR_HF: "SR_HF(0.1) * alpha_plus_beta" and "LR_HF(0.1) *
alpha" where the number in parenthesis is the value of omega.
* Be careful with the libxc convention of GGA functional, in which the LDA
contribution is included.
Args:
xc_code : str
A string to describe the linear combination of different XC functionals.
The X and C functional are separated by comma like '.8*LDA+.2*B86,VWN'.
If "HF" (exact exchange) is appeared in the string, the HF part will
be skipped. If an empty string "" is given, the returns exc, vxc,...
will be vectors of zeros.
rho : ndarray
Shape of ((*,N)) for electron density (and derivatives) if spin = 0;
Shape of ((*,N),(*,N)) for alpha/beta electron density (and derivatives) if spin > 0;
where N is number of grids.
rho (*,N) are ordered as (den,grad_x,grad_y,grad_z,laplacian,tau)
where grad_x = d/dx den, laplacian = \nabla^2 den, tau = 1/2(\nabla f)^2
In spin unrestricted case,
rho is ((den_u,grad_xu,grad_yu,grad_zu,laplacian_u,tau_u)
(den_d,grad_xd,grad_yd,grad_zd,laplacian_d,tau_d))
Kwargs:
spin : int
spin polarized if spin > 0
relativity : int
No effects.
verbose : int or object of :class:`Logger`
No effects.
Returns:
ex, vxc, fxc, kxc
where
* vxc = (vrho, vsigma, vlapl, vtau) for restricted case
* vxc for unrestricted case
| vrho[:,2] = (u, d)
| vsigma[:,3] = (uu, ud, dd)
| vlapl[:,2] = (u, d)
| vtau[:,2] = (u, d)
* fxc for restricted case:
(v2rho2, v2rhosigma, v2sigma2, v2lapl2, vtau2, v2rholapl, v2rhotau, v2lapltau, v2sigmalapl, v2sigmatau)
* fxc for unrestricted case:
| v2rho2[:,3] = (u_u, u_d, d_d)
| v2rhosigma[:,6] = (u_uu, u_ud, u_dd, d_uu, d_ud, d_dd)
| v2sigma2[:,6] = (uu_uu, uu_ud, uu_dd, ud_ud, ud_dd, dd_dd)
| v2lapl2[:,3]
| vtau2[:,3]
| v2rholapl[:,4]
| v2rhotau[:,4]
| v2lapltau[:,4]
| v2sigmalapl[:,6]
| v2sigmatau[:,6]
* kxc for restricted case:
(v3rho3, v3rho2sigma, v3rhosigma2, v3sigma3)
* kxc for unrestricted case:
| v3rho3[:,4] = (u_u_u, u_u_d, u_d_d, d_d_d)
| v3rho2sigma[:,9] = (u_u_uu, u_u_ud, u_u_dd, u_d_uu, u_d_ud, u_d_dd, d_d_uu, d_d_ud, d_d_dd)
| v3rhosigma2[:,12] = (u_uu_uu, u_uu_ud, u_uu_dd, u_ud_ud, u_ud_dd, u_dd_dd, d_uu_uu, d_uu_ud, d_uu_dd, d_ud_ud, d_ud_dd, d_dd_dd)
| v3sigma3[:,10] = (uu_uu_uu, uu_uu_ud, uu_uu_dd, uu_ud_ud, uu_ud_dd, uu_dd_dd, ud_ud_ud, ud_ud_dd, ud_dd_dd, dd_dd_dd)
see also libxc_itrf.c
''' # noqa: E501
hyb, fn_facs = parse_xc(xc_code)
if omega is not None:
hyb[2] = float(omega)
return _eval_xc(hyb, fn_facs, rho, spin, relativity, deriv, verbose)
def _eval_xc(hyb, fn_facs, rho, spin=0, relativity=0, deriv=1, verbose=None):
assert(deriv <= 3)
if spin == 0:
nspin = 1
rho_u = rho_d = numpy.asarray(rho, order='C')
else:
nspin = 2
rho_u = numpy.asarray(rho[0], order='C')
rho_d = numpy.asarray(rho[1], order='C')
assert(rho_u.dtype == numpy.double)
assert(rho_d.dtype == numpy.double)
if rho_u.ndim == 1:
rho_u = rho_u.reshape(1,-1)
rho_d = rho_d.reshape(1,-1)
ngrids = rho_u.shape[1]
fn_ids = [x[0] for x in fn_facs]
facs = [x[1] for x in fn_facs]
if hyb[2] != 0:
# Current implementation does not support different omegas for
# different RSH functionals if there are multiple RSHs
omega = [hyb[2]] * len(facs)
else:
omega = [0] * len(facs)
fn_ids_set = set(fn_ids)
if fn_ids_set.intersection(PROBLEMATIC_XC):
problem_xc = [PROBLEMATIC_XC[k]
for k in fn_ids_set.intersection(PROBLEMATIC_XC)]
warnings.warn('Libxc functionals %s may have discrepancy to xcfun '
'library.\n' % problem_xc)
if any([needs_laplacian(fid) for fid in fn_ids]):
raise NotImplementedError('laplacian in meta-GGA method')
n = len(fn_ids)
if (n == 0 or # xc_code = '' or xc_code = 'HF', an empty functional
all((is_lda(x) for x in fn_ids))):
if spin == 0:
nvar = 1
else:
nvar = 2
elif any((is_meta_gga(x) for x in fn_ids)):
if spin == 0:
nvar = 4
else:
nvar = 9
else: # GGA
if spin == 0:
nvar = 2
else:
nvar = 5
# Check that the density rho has the appropriate shape
# should it be >= or ==, in test test_xcfun.py, test_vs_libxc_rks
# the density contain 6 rows independently of the functional
if nvar == 1 or (nvar ==2 and spin > 0): # LDA
for rho_ud in [rho_u, rho_d]:
assert rho_ud.shape[0] >= 1
elif nvar == 2 or nvar == 5: # GGA
for rho_ud in [rho_u, rho_d]:
assert rho_ud.shape[0] >= 4
elif nvar == 4 or nvar == 9: # MGGA
for rho_ud in [rho_u, rho_d]:
assert rho_ud.shape[0] >= 6
else:
raise ValueError("Unknow nvar {}".format(nvar))
outlen = (math.factorial(nvar+deriv) //
(math.factorial(nvar) * math.factorial(deriv)))
outbuf = numpy.zeros((outlen,ngrids))
_itrf.LIBXC_eval_xc(ctypes.c_int(n),
(ctypes.c_int*n)(*fn_ids),
(ctypes.c_double*n)(*facs),
(ctypes.c_double*n)(*omega),
ctypes.c_int(nspin),
ctypes.c_int(deriv), ctypes.c_int(rho_u.shape[1]),
rho_u.ctypes.data_as(ctypes.c_void_p),
rho_d.ctypes.data_as(ctypes.c_void_p),
outbuf.ctypes.data_as(ctypes.c_void_p))
exc = outbuf[0]
vxc = fxc = kxc = None
if nvar == 1: # LDA
if deriv > 0:
vxc = (outbuf[1], None, None, None)
if deriv > 1:
fxc = (outbuf[2],) + (None,)*9
if deriv > 2:
kxc = (outbuf[3], None, None, None)
elif nvar == 2:
if spin == 0: # GGA
if deriv > 0:
vxc = (outbuf[1], outbuf[2], None, None)
if deriv > 1:
fxc = (outbuf[3], outbuf[4], outbuf[5],) + (None,)*7
if deriv > 2:
kxc = outbuf[6:10]
else: # LDA
if deriv > 0:
vxc = (outbuf[1:3].T, None, None, None)
if deriv > 1:
fxc = (outbuf[3:6].T,) + (None,)*9
if deriv > 2:
kxc = (outbuf[6:10].T, None, None, None)
elif nvar == 5: # GGA
if deriv > 0:
vxc = (outbuf[1:3].T, outbuf[3:6].T, None, None)
if deriv > 1:
fxc = (outbuf[6:9].T, outbuf[9:15].T, outbuf[15:21].T) + (None,)*7
if deriv > 2:
kxc = (outbuf[21:25].T, outbuf[25:34].T, outbuf[34:46].T, outbuf[46:56].T)
elif nvar == 4: # MGGA
if deriv > 0:
vxc = outbuf[1:5]
if deriv > 1:
fxc = outbuf[5:15]
if deriv > 2:
kxc = outbuf[15:19]
elif nvar == 9: # MGGA
if deriv > 0:
vxc = (outbuf[1:3].T, outbuf[3:6].T, outbuf[6:8].T, outbuf[8:10].T)
if deriv > 1:
fxc = (outbuf[10:13].T, outbuf[13:19].T, outbuf[19:25].T,
outbuf[25:28].T, outbuf[28:31].T, outbuf[31:35].T,
outbuf[35:39].T, outbuf[39:43].T, outbuf[43:49].T,
outbuf[49:55].T)
return exc, vxc, fxc, kxc
def define_xc_(ni, description, xctype='LDA', hyb=0, rsh=(0,0,0)):
'''Define XC functional. See also :func:`eval_xc` for the rules of input description.
Args:
ni : an instance of :class:`NumInt`
description : str
A string to describe the linear combination of different XC functionals.
The X and C functional are separated by comma like '.8*LDA+.2*B86,VWN'.
If "HF" was appeared in the string, it stands for the exact exchange.
Kwargs:
xctype : str
'LDA' or 'GGA' or 'MGGA'
hyb : float
hybrid functional coefficient
rsh : a list of three floats
coefficients (omega, alpha, beta) for range-separated hybrid functional.
omega is the exponent factor in attenuated Coulomb operator e^{-omega r_{12}}/r_{12}
alpha is the coefficient for long-range part, hybrid coefficient
can be obtained by alpha + beta
Examples:
>>> mol = gto.M(atom='O 0 0 0; H 0 0 1; H 0 1 0', basis='ccpvdz')
>>> mf = dft.RKS(mol)
>>> define_xc_(mf._numint, '.2*HF + .08*LDA + .72*B88, .81*LYP + .19*VWN')
>>> mf.kernel()
-76.3783361189611
>>> define_xc_(mf._numint, 'LDA*.08 + .72*B88 + .2*HF, .81*LYP + .19*VWN')
>>> mf.kernel()
-76.3783361189611
>>> def eval_xc(xc_code, rho, *args, **kwargs):
... exc = 0.01 * rho**2
... vrho = 0.01 * 2 * rho
... vxc = (vrho, None, None, None)
... fxc = None # 2nd order functional derivative
... kxc = None # 3rd order functional derivative
... return exc, vxc, fxc, kxc
>>> define_xc_(mf._numint, eval_xc, xctype='LDA')
>>> mf.kernel()
48.8525211046668
'''
if isinstance(description, str):
ni.eval_xc = lambda xc_code, rho, *args, **kwargs: \
eval_xc(description, rho, *args, **kwargs)
ni.hybrid_coeff = lambda *args, **kwargs: hybrid_coeff(description)
ni.rsh_coeff = lambda *args: rsh_coeff(description)
ni._xc_type = lambda *args: xc_type(description)
elif callable(description):
ni.eval_xc = description
ni.hybrid_coeff = lambda *args, **kwargs: hyb
ni.rsh_coeff = lambda *args, **kwargs: rsh
ni._xc_type = lambda *args: xctype
else:
raise ValueError('Unknown description %s' % description)
return ni
def define_xc(ni, description, xctype='LDA', hyb=0, rsh=(0,0,0)):
return define_xc_(copy.copy(ni), description, xctype, hyb, rsh)
define_xc.__doc__ = define_xc_.__doc__
|
sunqm/pyscf
|
pyscf/dft/libxc.py
|
Python
|
apache-2.0
| 115,888
|
[
"DIRAC",
"Gaussian",
"NWChem",
"Octopus",
"PySCF"
] |
441ed032ead085dd9cb61457208cc0953b7e85361f50e6c547b694c72a7b328c
|
# -*- coding: utf-8 -*-
import re
from .xn_parser import XNParserBase, safe_int, get_attribute
from .xn_data import XNResourceBundle
from . import xn_logger
logger = xn_logger.get(__name__, debug=False)
__doc__ = '''
Almost every page directly related to planet contains the display of
planet's current resources and energy info at the top:
- overview, buildings, researches, fleet, shipyard, defense, resources, merchant ...
- imperium, galaxy do not have it (they do not display single planet)
'''
class PlanetEnergyResParser(XNParserBase):
def __init__(self):
super(PlanetEnergyResParser, self).__init__()
# public
self.energy_left = 0
self.energy_total = 0
self.res_current = XNResourceBundle()
self.res_max_silos = XNResourceBundle()
self.res_per_hour = XNResourceBundle()
# internals
self._in_eleft = False
self._in_etot = False
self.clear()
def clear(self):
self.energy_left = 0
self.energy_total = 0
self.res_current = XNResourceBundle()
self.res_max_silos = XNResourceBundle()
self.res_per_hour = XNResourceBundle()
# clear internals
self._in_eleft = False
self._in_etot = False
def handle_starttag(self, tag: str, attrs: list):
super(PlanetEnergyResParser, self).handle_starttag(tag, attrs)
if tag == 'div':
div_title = get_attribute(attrs, 'title')
if div_title is None:
return
if div_title == 'Энергетический баланс':
self._in_eleft = True
return
if tag == 'span':
span_title = get_attribute(attrs, 'title')
if span_title is None:
return
if span_title == 'Выработка энергии':
self._in_etot = True
return
def handle_endtag(self, tag: str):
super(PlanetEnergyResParser, self).handle_endtag(tag)
if tag == 'div':
if self._in_eleft:
self._in_eleft = False
return
if tag == 'span':
if self._in_etot:
self._in_etot = False
return
def handle_data2(self, data: str, tag: str, attrs: list):
super(PlanetEnergyResParser, self).handle_data2(data, tag, attrs)
# if self._in_div_viewport_buildings:
# logger.debug(' handle_data2(tag={0}, data={1}, attrs={2})'.format(tag, data, attrs))
if tag == 'span':
if self._in_eleft:
self.energy_left = safe_int(data)
logger.debug('Got energy left: {0}'.format(self.energy_left))
return
if tag == 'font':
if self._in_etot:
self.energy_total = safe_int(data)
logger.debug('Got energy total: {0}'.format(self.energy_total))
return
if tag == 'div':
div_title = get_attribute(attrs, 'title')
if div_title == 'Энергетический баланс':
if data == '0':
self.energy_left = 0
logger.debug('Got energy left = 0 from div (no span)')
if tag == 'script':
script_type = get_attribute(attrs, 'type')
if script_type is None:
return
if script_type == 'text/javascript':
# var ress = new Array(5827, 14614, 4049);
# var max = new Array(180000,180000,180000);
# var production = new Array(0.95805555555556, 0.44055555555556, 0.010277777777778);
if data.startswith('var ress = new Array('):
# logger.debug('[{0}]'.format(data))
m = re.search(r'var ress = new Array\((\d+), (\d+), (\d+)\);', data)
if m is not None:
self.res_current.met = safe_int(m.group(1))
self.res_current.cry = safe_int(m.group(2))
self.res_current.deit = safe_int(m.group(3))
logger.debug('Got planet res_current: {0}'.format(str(self.res_current)))
m = re.search(r'var max = new Array\((\d+),(\d+),(\d+)\);', data)
if m is not None:
self.res_max_silos.met = safe_int(m.group(1))
self.res_max_silos.cry = safe_int(m.group(2))
self.res_max_silos.deit = safe_int(m.group(3))
logger.debug('Got planet res_max: {0}'.format(str(self.res_max_silos)))
m = re.search(r'var production = new Array\(([\d\.]+), ([\d\.]+), ([\d\.]+)\);', data)
if m is not None:
try:
met_per_second = float(m.group(1))
cry_per_second = float(m.group(2))
deit_per_second = float(m.group(3))
self.res_per_hour.met = int(met_per_second * 3600)
self.res_per_hour.cry = int(cry_per_second * 3600)
self.res_per_hour.deit = int(deit_per_second * 3600)
logger.debug('Got planet res per hour: {0}'.format(str(self.res_per_hour)))
except ValueError as e:
logger.warn('Failed to convert to float some of: {0}, {1}, {2}'.format(
m.group(1), m.group(2), m.group(3)))
# <div title="Энергетический баланс"><span class="positive">5</span></div>
# <div title="Энергетический баланс">0</div>
# <span title="Выработка энергии" class="hidden-xs"><font color="#00ff00">12.515</font></span>
# <script type="text/javascript">
# var ress = new Array(2265601, 4207911, 426557);
# var max = new Array(11062500,11062500,6937500);
# var production = new Array(14.3925, 7.2386111111111, 2.4711111111111);
# timeouts['res_count'] = window.setInterval(XNova.updateResources, 1000);
# var serverTime = 1451535635000 - Djs + (timezone + 6) * 1800000;
# </script>
|
minlexx/xnovacmd
|
ui/xnova/xn_parser_planet_energy.py
|
Python
|
gpl-2.0
| 6,189
|
[
"Galaxy"
] |
eace020b5a2bb4aee9e9b0ca20bafecd284b9455c1599a01768505336ead5e10
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017, the cclib development team
#
# This file is part of cclib (http://cclib.github.io) and is distributed under
# the terms of the BSD 3-Clause License.
"""A reader for XYZ (Cartesian coordinate) files."""
from cclib.io import filereader
from cclib.parser.data import ccData
from cclib.parser.utils import PeriodicTable
class XYZ(filereader.Reader):
"""A reader for XYZ (Cartesian coordinate) files."""
def __init__(self, source, *args, **kwargs):
super().__init__(source, *args, **kwargs)
self.pt = PeriodicTable()
def parse(self):
super().parse()
self.generate_repr()
return self.data
def generate_repr(self):
"""Convert the raw contents of the source into the internal representation."""
assert hasattr(self, 'filecontents')
it = iter(self.filecontents.splitlines())
# Ordering of lines:
# 1. number of atoms
# 2. comment line
# 3. line of at least 4 columns: 1 is atomic symbol (str), 2-4 are atomic coordinates (float)
# repeat for numver of atoms
# (4. optional blank line)
# repeat for multiple sets of coordinates
all_atomcoords = []
comments = []
while True:
try:
line = next(it)
if line.strip() == '':
line = next(it)
tokens = line.split()
assert len(tokens) >= 1
natom = int(tokens[0])
comments.append(next(it))
lines = []
for _ in range(natom):
line = next(it)
tokens = line.split()
assert len(tokens) >= 4
lines.append(tokens)
assert len(lines) == natom
atomsyms = [line[0] for line in lines]
atomnos = [self.pt.number[atomsym] for atomsym in atomsyms]
atomcoords = [line[1:4] for line in lines]
# Everything beyond the fourth column is ignored.
all_atomcoords.append(atomcoords)
except StopIteration:
break
attributes = {
'natom': natom,
'atomnos': atomnos,
'atomcoords': all_atomcoords,
'metadata': {"comments": comments},
}
self.data = ccData(attributes)
|
cclib/cclib
|
cclib/io/xyzreader.py
|
Python
|
bsd-3-clause
| 2,436
|
[
"cclib"
] |
a2fe1b0707e66fbd69b494700811fb66fabc455794885bb24830c3a3cc8dd281
|
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import sys
import time
from DIRAC import S_OK, S_ERROR
from DIRAC.Core.Base import Script
from DIRAC.Core.DISET.MessageClient import MessageClient
Script.parseCommandLine()
def sendPingMsg( msgClient, pingid = 0 ):
"""
Send Ping message to the server
"""
result = msgClient.createMessage( "Ping" )
if not result[ 'OK' ]:
return result
msgObj = result[ 'Value' ]
msgObj.id = pingid
return msgClient.sendMessage( msgObj )
def pongCB( msgObj ):
"""
Callback for the Pong message.
Just send a Ping message incrementing in 1 the id
"""
pongid = msgObj.id
print("RECEIVED PONG %d" % pongid)
return sendPingMsg( msgObj.msgClient, pongid + 1 )
def disconnectedCB( msgClient ):
"""
Reconnect :)
"""
retryCount = 0
while retryCount:
result = msgClient.connect()
if result[ 'OK' ]:
return result
time.sleep( 1 )
retryCount -= 1
return S_ERROR( "Could not reconnect... :P" )
if __name__ == "__main__":
msgClient = MessageClient( "Framework/PingPong" )
msgClient.subscribeToMessage( 'Pong', pongCB )
msgClient.subscribeToDisconnect( disconnectedCB )
result = msgClient.connect()
if not result[ 'OK' ]:
print("CANNOT CONNECT: %s" % result['Message'])
sys.exit(1)
result = sendPingMsg( msgClient )
if not result[ 'OK' ]:
print("CANNOT SEND PING: %s" % result['Message'])
sys.exit(1)
#Wait 10 secs of pingpongs :P
time.sleep( 10 )
|
yujikato/DIRAC
|
docs/source/DeveloperGuide/Systems/Framework/stableconns/client.py
|
Python
|
gpl-3.0
| 1,533
|
[
"DIRAC"
] |
46fe545cefad46d932681c0253eb72b3eb67599a4579ab729639ffe8111114fd
|
# Copyright 2008-2010 by Peter Cock. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Bio.SeqIO support for the "tab" (simple tab separated) file format.
You are expected to use this module via the Bio.SeqIO functions.
The "tab" format is an ad-hoc plain text file format where each sequence is
on one (long) line. Each line contains the identifier/description, followed
by a tab, followed by the sequence. For example, consider the following
short FASTA format file:
>ID123456 possible binding site?
CATCNAGATGACACTACGACTACGACTCAGACTAC
>ID123457 random sequence
ACACTACGACTACGACTCAGACTACAAN
Apart from the descriptions, this can be represented in the simple two column
tab separated format as follows:
ID123456(tab)CATCNAGATGACACTACGACTACGACTCAGACTAC
ID123457(tab)ACACTACGACTACGACTCAGACTACAAN
When reading this file, "ID123456" or "ID123457" will be taken as the record's
.id and .name property. There is no other information to record.
Similarly, when writing to this format, Biopython will ONLY record the record's
.id and .seq (and not the description or any other information) as in the
example above.
"""
from Bio.Alphabet import single_letter_alphabet
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.SeqIO.Interfaces import SequentialSequenceWriter
#This is a generator function!
def TabIterator(handle, alphabet = single_letter_alphabet):
"""Iterates over tab separated lines (as SeqRecord objects).
Each line of the file should contain one tab only, dividing the line
into an identifier and the full sequence.
handle - input file
alphabet - optional alphabet
The first field is taken as the record's .id and .name (regardless of
any spaces within the text) and the second field is the sequence.
Any blank lines are ignored.
"""
for line in handle:
try:
title, seq = line.split("\t") #will fail if more than one tab!
except:
if line.strip() == "":
#It's a blank line, ignore it
continue
raise ValueError("Each line should have one tab separating the" + \
" title and sequence, this line has %i tabs: %s" \
% (line.count("\t"), repr(line)))
title = title.strip()
seq = seq.strip() #removes the trailing new line
yield SeqRecord(Seq(seq, alphabet),
id=title, name=title,
description="")
class TabWriter(SequentialSequenceWriter):
"""Class to write simple tab separated format files.
Each line consists of "id(tab)sequence" only.
Any description, name or other annotation is not recorded.
"""
def write_record(self, record):
"""Write a single tab line to the file."""
assert self._header_written
assert not self._footer_written
self._record_written = True
title = self.clean(record.id)
seq = self._get_seq_string(record) #Catches sequence being None
assert "\t" not in title
assert "\n" not in title
assert "\r" not in title
assert "\t" not in seq
assert "\n" not in seq
assert "\r" not in seq
self.handle.write("%s\t%s\n" % (title, seq))
if __name__ == "__main__":
print "Running quick self test"
from StringIO import StringIO
#This example has a trailing blank line which should be ignored
handle = StringIO("Alpha\tAAAAAAA\nBeta\tCCCCCCC\n\n")
records = list(TabIterator(handle))
assert len(records) == 2
handle = StringIO("Alpha\tAAAAAAA\tExtra\nBeta\tCCCCCCC\n")
try:
records = list(TabIterator(handle))
assert False, "Should have reject this invalid example!"
except ValueError:
#Good!
pass
print "Done"
|
BlogomaticProject/Blogomatic
|
opt/blog-o-matic/usr/lib/python/Bio/SeqIO/TabIO.py
|
Python
|
gpl-2.0
| 3,968
|
[
"Biopython"
] |
f62971d590f5bfb121ca54a60b2e4ecd600265b472edef87a28ec89ae273ec2b
|
#!/usr/bin/env python
##################################################
## DEPENDENCIES
import sys
import os
import os.path
try:
import builtins as builtin
except ImportError:
import __builtin__ as builtin
from os.path import getmtime, exists
import time
import types
from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion
from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple
from Cheetah.Template import Template
from Cheetah.DummyTransaction import *
from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList
from Cheetah.CacheRegion import CacheRegion
import Cheetah.Filters as Filters
import Cheetah.ErrorCatchers as ErrorCatchers
##################################################
## MODULE CONSTANTS
VFFSL=valueFromFrameOrSearchList
VFSL=valueFromSearchList
VFN=valueForName
currentTime=time.time
__CHEETAH_version__ = '2.4.4'
__CHEETAH_versionTuple__ = (2, 4, 4, 'development', 0)
__CHEETAH_genTime__ = 1406885498.612909
__CHEETAH_genTimestamp__ = 'Fri Aug 1 18:31:38 2014'
__CHEETAH_src__ = '/home/wslee2/models/5-wo/force1plus/openpli3.0/build-force1plus/tmp/work/mips32el-oe-linux/enigma2-plugin-extensions-openwebif-1+git5+3c0c4fbdb28d7153bf2140459b553b3d5cdd4149-r0/git/plugin/controllers/views/web/epgsimilar.tmpl'
__CHEETAH_srcLastModified__ = 'Fri Aug 1 18:30:05 2014'
__CHEETAH_docstring__ = 'Autogenerated by Cheetah: The Python-Powered Template Engine'
if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple:
raise AssertionError(
'This template was compiled with Cheetah version'
' %s. Templates compiled before version %s must be recompiled.'%(
__CHEETAH_version__, RequiredCheetahVersion))
##################################################
## CLASSES
class epgsimilar(Template):
##################################################
## CHEETAH GENERATED METHODS
def __init__(self, *args, **KWs):
super(epgsimilar, self).__init__(*args, **KWs)
if not self._CHEETAH__instanceInitialized:
cheetahKWArgs = {}
allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
for k,v in KWs.items():
if k in allowedKWs: cheetahKWArgs[k] = v
self._initCheetahInstance(**cheetahKWArgs)
def respond(self, trans=None):
## CHEETAH: main method generated for this template
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
_orig_filter_35865719 = _filter
filterName = u'WebSafe'
if self._CHEETAH__filters.has_key("WebSafe"):
_filter = self._CHEETAH__currentFilter = self._CHEETAH__filters[filterName]
else:
_filter = self._CHEETAH__currentFilter = \
self._CHEETAH__filters[filterName] = getattr(self._CHEETAH__filtersLib, filterName)(self).filter
write(u'''<?xml version="1.0" encoding="UTF-8"?>
<e2eventlist>
''')
for event in VFFSL(SL,"events",True): # generated from line 4, col 2
write(u'''\t<e2event>
\t\t<e2eventid>''')
_v = VFFSL(SL,"str",False)(VFFSL(SL,"event.id",True)) # u'$str($event.id)' on line 6, col 14
if _v is not None: write(_filter(_v, rawExpr=u'$str($event.id)')) # from line 6, col 14.
write(u'''</e2eventid>
\t\t<e2eventstart>''')
_v = VFFSL(SL,"str",False)(VFFSL(SL,"event.begin_timestamp",True)) # u'$str($event.begin_timestamp)' on line 7, col 17
if _v is not None: write(_filter(_v, rawExpr=u'$str($event.begin_timestamp)')) # from line 7, col 17.
write(u'''</e2eventstart>
\t\t<e2eventduration>''')
_v = VFFSL(SL,"str",False)(VFFSL(SL,"event.duration_sec",True)) # u'$str($event.duration_sec)' on line 8, col 20
if _v is not None: write(_filter(_v, rawExpr=u'$str($event.duration_sec)')) # from line 8, col 20.
write(u'''</e2eventduration>
\t\t<e2eventcurrenttime>''')
_v = VFFSL(SL,"str",False)(VFFSL(SL,"event.now_timestamp",True)) # u'$str($event.now_timestamp)' on line 9, col 23
if _v is not None: write(_filter(_v, rawExpr=u'$str($event.now_timestamp)')) # from line 9, col 23.
write(u'''</e2eventcurrenttime>
\t\t<e2eventtitle>''')
_v = VFFSL(SL,"str",False)(VFFSL(SL,"event.title",True)) # u'$str($event.title)' on line 10, col 17
if _v is not None: write(_filter(_v, rawExpr=u'$str($event.title)')) # from line 10, col 17.
write(u'''</e2eventtitle>
\t\t<e2eventdescription>''')
_v = VFFSL(SL,"str",False)(VFFSL(SL,"event.shortdesc",True)) # u'$str($event.shortdesc)' on line 11, col 23
if _v is not None: write(_filter(_v, rawExpr=u'$str($event.shortdesc)')) # from line 11, col 23.
write(u'''</e2eventdescription>
\t\t<e2eventdescriptionextended>''')
_v = VFFSL(SL,"str",False)(VFFSL(SL,"event.longdesc",True)) # u'$str($event.longdesc)' on line 12, col 31
if _v is not None: write(_filter(_v, rawExpr=u'$str($event.longdesc)')) # from line 12, col 31.
write(u'''</e2eventdescriptionextended>
\t\t<e2eventservicereference>''')
_v = VFFSL(SL,"event.sref",True) # u'$event.sref' on line 13, col 28
if _v is not None: write(_filter(_v, rawExpr=u'$event.sref')) # from line 13, col 28.
write(u'''</e2eventservicereference>
\t\t<e2eventservicename>''')
_v = VFFSL(SL,"event.sname",True) # u'$event.sname' on line 14, col 23
if _v is not None: write(_filter(_v, rawExpr=u'$event.sname')) # from line 14, col 23.
write(u'''</e2eventservicename>
\t</e2event>
''')
write(u'''</e2eventlist>
''')
_filter = self._CHEETAH__currentFilter = _orig_filter_35865719
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
##################################################
## CHEETAH GENERATED ATTRIBUTES
_CHEETAH__instanceInitialized = False
_CHEETAH_version = __CHEETAH_version__
_CHEETAH_versionTuple = __CHEETAH_versionTuple__
_CHEETAH_genTime = __CHEETAH_genTime__
_CHEETAH_genTimestamp = __CHEETAH_genTimestamp__
_CHEETAH_src = __CHEETAH_src__
_CHEETAH_srcLastModified = __CHEETAH_srcLastModified__
_mainCheetahMethod_for_epgsimilar= 'respond'
## END CLASS DEFINITION
if not hasattr(epgsimilar, '_initCheetahAttributes'):
templateAPIClass = getattr(epgsimilar, '_CHEETAH_templateClass', Template)
templateAPIClass._addCheetahPlumbingCodeToClass(epgsimilar)
# CHEETAH was developed by Tavis Rudd and Mike Orr
# with code, advice and input from many other volunteers.
# For more information visit http://www.CheetahTemplate.org/
##################################################
## if run from command line:
if __name__ == '__main__':
from Cheetah.TemplateCmdLineIface import CmdLineIface
CmdLineIface(templateObj=epgsimilar()).run()
|
MOA-2011/enigma2-plugin-extensions-openwebif
|
plugin/controllers/views/web/epgsimilar.py
|
Python
|
gpl-2.0
| 7,547
|
[
"VisIt"
] |
56acbe7ac2ae0b211061a5ebdea4cd100500409b2e3aff64a70be244bcd16a60
|
# -*- coding: utf-8 -*-
#
# © 2015-2016 Krux Digital, Inc.
#
#
# Standard libraries
#
from builtins import range
import string
from collections import Mapping
#
# Third party libraries
#
import boto.utils
from enum import Enum
#
# Internal libraries
#
from krux.logging import get_logger
class Error(Exception):
pass
def get_instance_region():
"""
Query the instance metadata service and return the region this instance is
placed in. If the metadata service can't be contacted, return a generic
default instead.
"""
# TODO: XXX This shouldn't get called if we're not on EC2.
zone = boto.utils.get_instance_metadata().get('placement', {}).get('availability-zone', None)
if zone is None:
get_logger('krux_boto').warn('get_instance_region failed to get the local instance region')
raise Error('get_instance_region failed to get the local instance region')
return zone.rstrip(string.ascii_lowercase)
def setup_hosts(hosts, accepted_domains, default):
"""
Loop through hosts to check if the domain matches any in accepted_domains. If not, append default.
This function will return a new list.
:param hosts: A list of hostname strings
:param accepted_domains: A list of accepted domain strings
:param default: A default domain name string to be appended to the hostnames
:return: A list of modified host name strings
"""
new_hostnames = []
for i in range(len(hosts)):
# len(hosts[i]) is there for Python 2.6 string slice support
if any([hosts[i][-len(domain):len(hosts[i])] == domain for domain in accepted_domains]):
new_hostnames.append(hosts[i])
else:
new_hostnames.append(hosts[i] + '.' + default)
return new_hostnames
# Region codes
class __RegionCode(Mapping):
# GOTCHA: The dictionary is created by matching the values.
# Therefore, when adding a region, make sure the values of the enums match.
# i.e. If we add LA as one of the regions, then Region.LA.value == Code.LAX.value.
class Code(Enum):
"""
Krux uses the largest airport in the region for the codename of AWS region.
This enum is the representation of that.
"""
# Use IATA codes since they sound closer to the colloquial airport names
ASH = 1 # Ashburn, Virginia
PDX = 2 # Portland, Oregon
DUB = 3 # Dublin, Ireland
SIN = 4 # Singapore
BOM = 5 # Mumbai (Bombay), India
SYD = 6 # Sydney, Australia
FRA = 7 # Frankfurt, Germany
NRT = 8 # Tokyo (Narita), Japan
ICN = 9 # Seoul (Incheon), South Korea
GRU = 10 # Sao Paulo (Guarulhos), Brazil
SJC = 11 # San Jose, California
CMH = 12 # Columbus, Ohio
class Region(Enum):
"""
Names of AWS regions as an enum.
"""
us_east_1 = 1
us_west_2 = 2
eu_west_1 = 3
ap_southeast_1 = 4
ap_south_1 = 5
ap_southeast_2 = 6
eu_central_1 = 7
ap_northeast_1 = 8
ap_northeast_2 = 9
sa_east_1 = 10
us_west_1 = 11
us_east_2 = 12
def __str__(self):
return self.name.lower().replace('_', '-')
def __init__(self):
self._wrapped = {}
for code in list(self.Code):
self._wrapped[code] = self.Region(code.value)
for reg in list(self.Region):
self._wrapped[reg] = self.Code(reg.value)
# HACK: Enum does not allow us to override its __getitem__() method.
# Thus, we cannot handle the difference of underscore and dash gracefully.
# However, since __getitem__() is merely a lookup of _member_map_ dictionary, duplicate the elements
# in the private dictionary so that we can handle AWS region <-> RegionCode.Region conversion smoothly.
dash_dict = {}
for name, region in self.Region._member_map_.items():
dash_dict[name.lower().replace('_', '-')] = region
self.Region._member_map_.update(dash_dict)
def __iter__(self):
return iter(self._wrapped)
def __len__(self):
return len(self._wrapped)
def __getitem__(self, key):
if isinstance(key, self.Region) or isinstance(key, self.Code):
return self._wrapped[key]
elif isinstance(key, str):
key = key.replace('-', '_')
code = getattr(self.Code, key.upper(), None)
if code is not None:
return self._wrapped[code]
reg = getattr(self.Region, key.lower(), None)
if reg is not None:
return self._wrapped[reg]
raise KeyError(key)
RegionCode = __RegionCode()
|
krux/python-krux-boto
|
krux_boto/util.py
|
Python
|
mit
| 4,773
|
[
"COLUMBUS"
] |
72cfe82176d3474c67f91258d527e23717f53d20dcd472db83991a8d94e0f5db
|
#!/usr/bin/env python
# coding: utf-8
"""
This script identifies the boundaries of a given track using the Spectral
Clustering method published here:
Mcfee, B., & Ellis, D. P. W. (2014). Analyzing Song Structure with Spectral
Clustering. In Proc. of the 15th International Society for Music Information
Retrieval Conference (pp. 405–410). Taipei, Taiwan.
Original code by Brian McFee from:
https://github.com/bmcfee/laplacian_segmentation
"""
__author__ = "Oriol Nieto"
__copyright__ = "Copyright 2014, Music and Audio Research Lab (MARL)"
__license__ = "GPL"
__version__ = "1.0"
__email__ = "oriol@nyu.edu"
import logging
import numpy as np
import msaf
from msaf.algorithms.interface import SegmenterInterface
from . import main
class Segmenter(SegmenterInterface):
def process(self):
"""Main process.
Returns
-------
est_idxs : np.array(N) or list
Estimated times for the segment boundaries in frame indeces.
List if hierarchical segmentation.
est_labels : np.array(N-1) or list
Estimated labels for the segments.
List if hierarchical segmentation.
"""
# Preprocess to obtain features, times, and input boundary indeces
F = self._preprocess()
# Read frame_times
self.hpcp, self.mfcc, self.tonnetz, self.cqt, beats, dur, self.anal = \
msaf.io.get_features(self.audio_file, annot_beats=self.annot_beats,
framesync=self.framesync,
pre_features=self.features)
frame_times = beats
if self.framesync:
frame_times = msaf.utils.get_time_frames(dur, self.anal)
# Brian wants HPCP and MFCC
# (transosed, because he's that kind of person)
F = (self.hpcp.T, self.mfcc.T)
# Do actual segmentation
est_idxs, est_labels = main.do_segmentation(F, frame_times,
self.config,
self.in_bound_idxs)
if 'numpy' in str(type(est_idxs)):
# Flat output
assert est_idxs[0] == 0 and est_idxs[-1] == F[0].shape[1] - 1
est_idxs, est_labels = self._postprocess(est_idxs, est_labels)
else:
# Hierarchical output
for layer in range(len(est_idxs)):
assert est_idxs[layer][0] == 0 and \
est_idxs[layer][-1] == F[0].shape[1] - 1
est_idxs[layer], est_labels[layer] = \
self._postprocess(est_idxs[layer], est_labels[layer])
return est_idxs, est_labels
def processFlat(self):
"""Main process.for flat segmentation.
Returns
-------
est_idxs : np.array(N)
Estimated times for the segment boundaries in frame indeces.
est_labels : np.array(N-1)
Estimated labels for the segments.
"""
return self.process()
def processHierarchical(self):
"""Main process.for hierarchial segmentation.
Returns
-------
est_idxs : list
List with np.arrays for each layer of segmentation containing
the estimated indeces for the segment boundaries.
est_labels : list
List with np.arrays containing the labels for each layer of the
hierarchical segmentation.
"""
return self.process()
|
guiquanz/msaf
|
msaf/algorithms/scluster/segmenter.py
|
Python
|
mit
| 3,487
|
[
"Brian"
] |
4d1b786f5c8945c2c063b387ec0e050a8b2c7b95a26e4eb2f75057e02309e3c7
|
import lb_loader
import pandas as pd
import simtk.openmm.app as app
import numpy as np
import simtk.openmm as mm
from simtk import unit as u
from openmmtools import hmc_integrators, testsystems
pd.set_option('display.width', 1000)
collision_rate = 10000.0 / u.picoseconds
sysname = "ljbox"
system, positions, groups, temperature, timestep = lb_loader.load(sysname)
integrator = mm.LangevinIntegrator(temperature, 1.0 / u.picoseconds, timestep / 4.)
context = lb_loader.build(system, integrator, positions, temperature)
integrator.step(20000)
positions = context.getState(getPositions=True).getPositions()
Neff_cutoff = 500.
integrator = mm.LangevinIntegrator(temperature, 1.0 / u.picoseconds, timestep / 2.)
context = lb_loader.build(system, integrator, positions, temperature)
data, start, g, Neff = lb_loader.converge(context, n_steps=100, Neff_cutoff=Neff_cutoff)
data.to_csv("./data/%s_langevin.csv" % sysname)
integrator = hmc_integrators.GHMCIntegrator(temperature, steps_per_hmc=10, timestep=timestep, collision_rate=collision_rate)
context = lb_loader.build(system, integrator, positions, temperature)
data, start, g, Neff = lb_loader.converge(context, n_steps=10, Neff_cutoff=Neff_cutoff)
data.to_csv("./data/%s_ghmc.csv" % sysname)
integrator = hmc_integrators.GHMCIntegrator(temperature, steps_per_hmc=10, timestep=timestep, collision_rate=1.0 / u.picoseconds)
context = lb_loader.build(system, integrator, positions, temperature)
data, start, g, Neff = lb_loader.converge(context, n_steps=10, Neff_cutoff=Neff_cutoff)
data.to_csv("./data/%s_ghmc1.csv" % sysname)
integrator = hmc_integrators.XHMCIntegrator(temperature, steps_per_hmc=10, timestep=timestep, collision_rate=1.0 / u.picoseconds, extra_chances=5)
context = lb_loader.build(system, integrator, positions, temperature)
data, start, g, Neff = lb_loader.converge(context, n_steps=10, Neff_cutoff=Neff_cutoff)
data.to_csv("./data/%s_xhmc1.csv" % sysname)
integrator = hmc_integrators.GHMCRESPA(temperature, steps_per_hmc=10, timestep=timestep, collision_rate=1.0 / u.picoseconds, groups=groups)
context = lb_loader.build(system, integrator, positions, temperature)
data, start, g, Neff = lb_loader.converge(context, n_steps=10, Neff_cutoff=Neff_cutoff)
data.to_csv("./data/%s_rghmc1.csv" % sysname)
integrator = hmc_integrators.XHMCRESPAIntegrator(temperature, steps_per_hmc=10, timestep=timestep, collision_rate=1.0 / u.picoseconds, extra_chances=5, groups=groups)
context = lb_loader.build(system, integrator, positions, temperature)
data, start, g, Neff = lb_loader.converge(context, n_steps=10, Neff_cutoff=Neff_cutoff)
data.to_csv("./data/%s_rxhmc1.csv" % sysname)
|
kyleabeauchamp/HMCNotes
|
code/correctness/old/test_hmc_correctness_ljbox_converger.py
|
Python
|
gpl-2.0
| 2,651
|
[
"OpenMM"
] |
774fafb2768ebd1edcbfb5d8d3d94e6005505412f601b7785cab18eb1151b533
|
# -*- coding: latin-1 -*-
# Copyright (C) 2009-2014 CEA/DEN, EDF R&D
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
#
# Hexa : Creation d'hexaedres
import hexablock
import os
#---+----1----+----2----+----3----+----4----+----5----+----6----+----7----+----8
# ======================================================= make_grid
def make_grid (doc) :
ori = doc.addVertex ( 0, 0, 0)
vz = doc.addVector ( 0, 0, 1)
vx = doc.addVector ( 1 ,0, 0)
dr = 1
da = 360
dl = 1
nr = 1
na = 6
nl = 1
grid = doc.makeCylindrical (ori, vx,vz, dr,da,dl, nr,na,nl, False)
doc .saveVtk ("transfo1.vtk")
return grid
# ======================================================= test_translation
def test_translation () :
doc = hexablock.addDocument ("default")
grid = make_grid (doc)
devant = doc.addVector (10, 0, 0)
grid2 = doc.performTranslation (grid, devant)
doc .saveVtk ("transfo2.vtk")
return doc
# ======================================================= test_scale
def test_scale () :
doc = hexablock.addDocument ("default")
grid = make_grid (doc)
dest = doc.addVertex (15, 0, 0)
grid2 = doc.performScale (grid, dest, 0.5)
doc .saveVtk ("transfo3.vtk")
return doc
# ======================================================= test_sym_point
def test_sym_point () :
doc = hexablock.addDocument ("default")
grid = make_grid (doc)
orig = doc.addVertex (5, 0, 0)
ier = doc.performSymmetryPoint (grid, orig)
doc .saveVtk ("transfo4.vtk")
return doc
# ======================================================= test_sym_line
def test_sym_line () :
doc = hexablock.addDocument ("default")
grid = make_grid (doc)
orig = doc.addVertex (5, 0, 0)
dir = doc.addVector (0, 0, 1);
ier = doc.performSymmetryLine (grid, orig, dir)
doc .saveVtk ("transfo5.vtk")
return doc
# ======================================================= test_sym_plan
def test_sym_plan () :
doc = hexablock.addDocument ("default")
grid = make_grid (doc)
orig = doc.addVertex (5, 0, 0)
dir = doc.addVector (1, 0, 0);
ier = doc.performSymmetryPlane (grid, orig, dir)
doc .saveVtk ("transfo6.vtk")
return doc
# ================================================================= Begin
### doc = test_translation ()
doc = test_scale ()
### doc = test_sym_point ()
### doc = test_sym_line ()
### doc = test_sym_plan ()
law = doc.addLaw("Uniform", 4)
for j in range(doc.countPropagation()):
propa = doc.getPropagation(j)
propa.setLaw(law)
mesh_hexas = hexablock.mesh(doc, "maillage:hexas")
|
FedoraScientific/salome-hexablock
|
src/TEST_PY/test_unit/test_perform.py
|
Python
|
lgpl-2.1
| 3,428
|
[
"VTK"
] |
e6988cab2124bd4c1e97fee076f52dcab39ffdffc4bd6404e24f897af16cdd4e
|
"""
This is only meant to add docs to objects defined in C-extension modules.
The purpose is to allow easier editing of the docstrings without
requiring a re-compile.
NOTE: Many of the methods of ndarray have corresponding functions.
If you update these docstrings, please keep also the ones in
core/fromnumeric.py, core/defmatrix.py up-to-date.
"""
from __future__ import division, absolute_import, print_function
from numpy.lib import add_newdoc
###############################################################################
#
# flatiter
#
# flatiter needs a toplevel description
#
###############################################################################
add_newdoc('numpy.core', 'flatiter',
"""
Flat iterator object to iterate over arrays.
A `flatiter` iterator is returned by ``x.flat`` for any array `x`.
It allows iterating over the array as if it were a 1-D array,
either in a for-loop or by calling its `next` method.
Iteration is done in row-major, C-style order (the last
index varying the fastest). The iterator can also be indexed using
basic slicing or advanced indexing.
See Also
--------
ndarray.flat : Return a flat iterator over an array.
ndarray.flatten : Returns a flattened copy of an array.
Notes
-----
A `flatiter` iterator can not be constructed directly from Python code
by calling the `flatiter` constructor.
Examples
--------
>>> x = np.arange(6).reshape(2, 3)
>>> fl = x.flat
>>> type(fl)
<type 'numpy.flatiter'>
>>> for item in fl:
... print(item)
...
0
1
2
3
4
5
>>> fl[2:4]
array([2, 3])
""")
# flatiter attributes
add_newdoc('numpy.core', 'flatiter', ('base',
"""
A reference to the array that is iterated over.
Examples
--------
>>> x = np.arange(5)
>>> fl = x.flat
>>> fl.base is x
True
"""))
add_newdoc('numpy.core', 'flatiter', ('coords',
"""
An N-dimensional tuple of current coordinates.
Examples
--------
>>> x = np.arange(6).reshape(2, 3)
>>> fl = x.flat
>>> fl.coords
(0, 0)
>>> fl.next()
0
>>> fl.coords
(0, 1)
"""))
add_newdoc('numpy.core', 'flatiter', ('index',
"""
Current flat index into the array.
Examples
--------
>>> x = np.arange(6).reshape(2, 3)
>>> fl = x.flat
>>> fl.index
0
>>> fl.next()
0
>>> fl.index
1
"""))
# flatiter functions
add_newdoc('numpy.core', 'flatiter', ('__array__',
"""__array__(type=None) Get array from iterator
"""))
add_newdoc('numpy.core', 'flatiter', ('copy',
"""
copy()
Get a copy of the iterator as a 1-D array.
Examples
--------
>>> x = np.arange(6).reshape(2, 3)
>>> x
array([[0, 1, 2],
[3, 4, 5]])
>>> fl = x.flat
>>> fl.copy()
array([0, 1, 2, 3, 4, 5])
"""))
###############################################################################
#
# nditer
#
###############################################################################
add_newdoc('numpy.core', 'nditer',
"""
Efficient multi-dimensional iterator object to iterate over arrays.
To get started using this object, see the
:ref:`introductory guide to array iteration <arrays.nditer>`.
Parameters
----------
op : ndarray or sequence of array_like
The array(s) to iterate over.
flags : sequence of str, optional
Flags to control the behavior of the iterator.
* "buffered" enables buffering when required.
* "c_index" causes a C-order index to be tracked.
* "f_index" causes a Fortran-order index to be tracked.
* "multi_index" causes a multi-index, or a tuple of indices
with one per iteration dimension, to be tracked.
* "common_dtype" causes all the operands to be converted to
a common data type, with copying or buffering as necessary.
* "delay_bufalloc" delays allocation of the buffers until
a reset() call is made. Allows "allocate" operands to
be initialized before their values are copied into the buffers.
* "external_loop" causes the `values` given to be
one-dimensional arrays with multiple values instead of
zero-dimensional arrays.
* "grow_inner" allows the `value` array sizes to be made
larger than the buffer size when both "buffered" and
"external_loop" is used.
* "ranged" allows the iterator to be restricted to a sub-range
of the iterindex values.
* "refs_ok" enables iteration of reference types, such as
object arrays.
* "reduce_ok" enables iteration of "readwrite" operands
which are broadcasted, also known as reduction operands.
* "zerosize_ok" allows `itersize` to be zero.
op_flags : list of list of str, optional
This is a list of flags for each operand. At minimum, one of
"readonly", "readwrite", or "writeonly" must be specified.
* "readonly" indicates the operand will only be read from.
* "readwrite" indicates the operand will be read from and written to.
* "writeonly" indicates the operand will only be written to.
* "no_broadcast" prevents the operand from being broadcasted.
* "contig" forces the operand data to be contiguous.
* "aligned" forces the operand data to be aligned.
* "nbo" forces the operand data to be in native byte order.
* "copy" allows a temporary read-only copy if required.
* "updateifcopy" allows a temporary read-write copy if required.
* "allocate" causes the array to be allocated if it is None
in the `op` parameter.
* "no_subtype" prevents an "allocate" operand from using a subtype.
* "arraymask" indicates that this operand is the mask to use
for selecting elements when writing to operands with the
'writemasked' flag set. The iterator does not enforce this,
but when writing from a buffer back to the array, it only
copies those elements indicated by this mask.
* 'writemasked' indicates that only elements where the chosen
'arraymask' operand is True will be written to.
op_dtypes : dtype or tuple of dtype(s), optional
The required data type(s) of the operands. If copying or buffering
is enabled, the data will be converted to/from their original types.
order : {'C', 'F', 'A', 'K'}, optional
Controls the iteration order. 'C' means C order, 'F' means
Fortran order, 'A' means 'F' order if all the arrays are Fortran
contiguous, 'C' order otherwise, and 'K' means as close to the
order the array elements appear in memory as possible. This also
affects the element memory order of "allocate" operands, as they
are allocated to be compatible with iteration order.
Default is 'K'.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur when making a copy
or buffering. Setting this to 'unsafe' is not recommended,
as it can adversely affect accumulations.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
op_axes : list of list of ints, optional
If provided, is a list of ints or None for each operands.
The list of axes for an operand is a mapping from the dimensions
of the iterator to the dimensions of the operand. A value of
-1 can be placed for entries, causing that dimension to be
treated as "newaxis".
itershape : tuple of ints, optional
The desired shape of the iterator. This allows "allocate" operands
with a dimension mapped by op_axes not corresponding to a dimension
of a different operand to get a value not equal to 1 for that
dimension.
buffersize : int, optional
When buffering is enabled, controls the size of the temporary
buffers. Set to 0 for the default value.
Attributes
----------
dtypes : tuple of dtype(s)
The data types of the values provided in `value`. This may be
different from the operand data types if buffering is enabled.
finished : bool
Whether the iteration over the operands is finished or not.
has_delayed_bufalloc : bool
If True, the iterator was created with the "delay_bufalloc" flag,
and no reset() function was called on it yet.
has_index : bool
If True, the iterator was created with either the "c_index" or
the "f_index" flag, and the property `index` can be used to
retrieve it.
has_multi_index : bool
If True, the iterator was created with the "multi_index" flag,
and the property `multi_index` can be used to retrieve it.
index
When the "c_index" or "f_index" flag was used, this property
provides access to the index. Raises a ValueError if accessed
and `has_index` is False.
iterationneedsapi : bool
Whether iteration requires access to the Python API, for example
if one of the operands is an object array.
iterindex : int
An index which matches the order of iteration.
itersize : int
Size of the iterator.
itviews
Structured view(s) of `operands` in memory, matching the reordered
and optimized iterator access pattern.
multi_index
When the "multi_index" flag was used, this property
provides access to the index. Raises a ValueError if accessed
accessed and `has_multi_index` is False.
ndim : int
The iterator's dimension.
nop : int
The number of iterator operands.
operands : tuple of operand(s)
The array(s) to be iterated over.
shape : tuple of ints
Shape tuple, the shape of the iterator.
value
Value of `operands` at current iteration. Normally, this is a
tuple of array scalars, but if the flag "external_loop" is used,
it is a tuple of one dimensional arrays.
Notes
-----
`nditer` supersedes `flatiter`. The iterator implementation behind
`nditer` is also exposed by the NumPy C API.
The Python exposure supplies two iteration interfaces, one which follows
the Python iterator protocol, and another which mirrors the C-style
do-while pattern. The native Python approach is better in most cases, but
if you need the iterator's coordinates or index, use the C-style pattern.
Examples
--------
Here is how we might write an ``iter_add`` function, using the
Python iterator protocol::
def iter_add_py(x, y, out=None):
addop = np.add
it = np.nditer([x, y, out], [],
[['readonly'], ['readonly'], ['writeonly','allocate']])
for (a, b, c) in it:
addop(a, b, out=c)
return it.operands[2]
Here is the same function, but following the C-style pattern::
def iter_add(x, y, out=None):
addop = np.add
it = np.nditer([x, y, out], [],
[['readonly'], ['readonly'], ['writeonly','allocate']])
while not it.finished:
addop(it[0], it[1], out=it[2])
it.iternext()
return it.operands[2]
Here is an example outer product function::
def outer_it(x, y, out=None):
mulop = np.multiply
it = np.nditer([x, y, out], ['external_loop'],
[['readonly'], ['readonly'], ['writeonly', 'allocate']],
op_axes=[range(x.ndim)+[-1]*y.ndim,
[-1]*x.ndim+range(y.ndim),
None])
for (a, b, c) in it:
mulop(a, b, out=c)
return it.operands[2]
>>> a = np.arange(2)+1
>>> b = np.arange(3)+1
>>> outer_it(a,b)
array([[1, 2, 3],
[2, 4, 6]])
Here is an example function which operates like a "lambda" ufunc::
def luf(lamdaexpr, *args, **kwargs):
"luf(lambdaexpr, op1, ..., opn, out=None, order='K', casting='safe', buffersize=0)"
nargs = len(args)
op = (kwargs.get('out',None),) + args
it = np.nditer(op, ['buffered','external_loop'],
[['writeonly','allocate','no_broadcast']] +
[['readonly','nbo','aligned']]*nargs,
order=kwargs.get('order','K'),
casting=kwargs.get('casting','safe'),
buffersize=kwargs.get('buffersize',0))
while not it.finished:
it[0] = lamdaexpr(*it[1:])
it.iternext()
return it.operands[0]
>>> a = np.arange(5)
>>> b = np.ones(5)
>>> luf(lambda i,j:i*i + j/2, a, b)
array([ 0.5, 1.5, 4.5, 9.5, 16.5])
""")
# nditer methods
add_newdoc('numpy.core', 'nditer', ('copy',
"""
copy()
Get a copy of the iterator in its current state.
Examples
--------
>>> x = np.arange(10)
>>> y = x + 1
>>> it = np.nditer([x, y])
>>> it.next()
(array(0), array(1))
>>> it2 = it.copy()
>>> it2.next()
(array(1), array(2))
"""))
add_newdoc('numpy.core', 'nditer', ('debug_print',
"""
debug_print()
Print the current state of the `nditer` instance and debug info to stdout.
"""))
add_newdoc('numpy.core', 'nditer', ('enable_external_loop',
"""
enable_external_loop()
When the "external_loop" was not used during construction, but
is desired, this modifies the iterator to behave as if the flag
was specified.
"""))
add_newdoc('numpy.core', 'nditer', ('iternext',
"""
iternext()
Check whether iterations are left, and perform a single internal iteration
without returning the result. Used in the C-style pattern do-while
pattern. For an example, see `nditer`.
Returns
-------
iternext : bool
Whether or not there are iterations left.
"""))
add_newdoc('numpy.core', 'nditer', ('remove_axis',
"""
remove_axis(i)
Removes axis `i` from the iterator. Requires that the flag "multi_index"
be enabled.
"""))
add_newdoc('numpy.core', 'nditer', ('remove_multi_index',
"""
remove_multi_index()
When the "multi_index" flag was specified, this removes it, allowing
the internal iteration structure to be optimized further.
"""))
add_newdoc('numpy.core', 'nditer', ('reset',
"""
reset()
Reset the iterator to its initial state.
"""))
###############################################################################
#
# broadcast
#
###############################################################################
add_newdoc('numpy.core', 'broadcast',
"""
Produce an object that mimics broadcasting.
Parameters
----------
in1, in2, ... : array_like
Input parameters.
Returns
-------
b : broadcast object
Broadcast the input parameters against one another, and
return an object that encapsulates the result.
Amongst others, it has ``shape`` and ``nd`` properties, and
may be used as an iterator.
See Also
--------
broadcast_arrays
broadcast_to
Examples
--------
Manually adding two vectors, using broadcasting:
>>> x = np.array([[1], [2], [3]])
>>> y = np.array([4, 5, 6])
>>> b = np.broadcast(x, y)
>>> out = np.empty(b.shape)
>>> out.flat = [u+v for (u,v) in b]
>>> out
array([[ 5., 6., 7.],
[ 6., 7., 8.],
[ 7., 8., 9.]])
Compare against built-in broadcasting:
>>> x + y
array([[5, 6, 7],
[6, 7, 8],
[7, 8, 9]])
""")
# attributes
add_newdoc('numpy.core', 'broadcast', ('index',
"""
current index in broadcasted result
Examples
--------
>>> x = np.array([[1], [2], [3]])
>>> y = np.array([4, 5, 6])
>>> b = np.broadcast(x, y)
>>> b.index
0
>>> b.next(), b.next(), b.next()
((1, 4), (1, 5), (1, 6))
>>> b.index
3
"""))
add_newdoc('numpy.core', 'broadcast', ('iters',
"""
tuple of iterators along ``self``'s "components."
Returns a tuple of `numpy.flatiter` objects, one for each "component"
of ``self``.
See Also
--------
numpy.flatiter
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> row, col = b.iters
>>> row.next(), col.next()
(1, 4)
"""))
add_newdoc('numpy.core', 'broadcast', ('ndim',
"""
Number of dimensions of broadcasted result. Alias for `nd`.
.. versionadded:: 1.12.0
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> b.ndim
2
"""))
add_newdoc('numpy.core', 'broadcast', ('nd',
"""
Number of dimensions of broadcasted result. For code intended for NumPy
1.12.0 and later the more consistent `ndim` is preferred.
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> b.nd
2
"""))
add_newdoc('numpy.core', 'broadcast', ('numiter',
"""
Number of iterators possessed by the broadcasted result.
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> b.numiter
2
"""))
add_newdoc('numpy.core', 'broadcast', ('shape',
"""
Shape of broadcasted result.
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> b.shape
(3, 3)
"""))
add_newdoc('numpy.core', 'broadcast', ('size',
"""
Total size of broadcasted result.
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> b.size
9
"""))
add_newdoc('numpy.core', 'broadcast', ('reset',
"""
reset()
Reset the broadcasted result's iterator(s).
Parameters
----------
None
Returns
-------
None
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]]
>>> b = np.broadcast(x, y)
>>> b.index
0
>>> b.next(), b.next(), b.next()
((1, 4), (2, 4), (3, 4))
>>> b.index
3
>>> b.reset()
>>> b.index
0
"""))
###############################################################################
#
# numpy functions
#
###############################################################################
add_newdoc('numpy.core.multiarray', 'array',
"""
array(object, dtype=None, copy=True, order='K', subok=False, ndmin=0)
Create an array.
Parameters
----------
object : array_like
An array, any object exposing the array interface, an object whose
__array__ method returns an array, or any (nested) sequence.
dtype : data-type, optional
The desired data-type for the array. If not given, then the type will
be determined as the minimum type required to hold the objects in the
sequence. This argument can only be used to 'upcast' the array. For
downcasting, use the .astype(t) method.
copy : bool, optional
If true (default), then the object is copied. Otherwise, a copy will
only be made if __array__ returns a copy, if obj is a nested sequence,
or if a copy is needed to satisfy any of the other requirements
(`dtype`, `order`, etc.).
order : {'K', 'A', 'C', 'F'}, optional
Specify the memory layout of the array. If object is not an array, the
newly created array will be in C order (row major) unless 'F' is
specified, in which case it will be in Fortran order (column major).
If object is an array the following holds.
===== ========= ===================================================
order no copy copy=True
===== ========= ===================================================
'K' unchanged F & C order preserved, otherwise most similar order
'A' unchanged F order if input is F and not C, otherwise C order
'C' C order C order
'F' F order F order
===== ========= ===================================================
When ``copy=False`` and a copy is made for other reasons, the result is
the same as if ``copy=True``, with some exceptions for `A`, see the
Notes section. The default order is 'K'.
subok : bool, optional
If True, then sub-classes will be passed-through, otherwise
the returned array will be forced to be a base-class array (default).
ndmin : int, optional
Specifies the minimum number of dimensions that the resulting
array should have. Ones will be pre-pended to the shape as
needed to meet this requirement.
Returns
-------
out : ndarray
An array object satisfying the specified requirements.
See Also
--------
empty, empty_like, zeros, zeros_like, ones, ones_like, full, full_like
Notes
-----
When order is 'A' and `object` is an array in neither 'C' nor 'F' order,
and a copy is forced by a change in dtype, then the order of the result is
not necessarily 'C' as expected. This is likely a bug.
Examples
--------
>>> np.array([1, 2, 3])
array([1, 2, 3])
Upcasting:
>>> np.array([1, 2, 3.0])
array([ 1., 2., 3.])
More than one dimension:
>>> np.array([[1, 2], [3, 4]])
array([[1, 2],
[3, 4]])
Minimum dimensions 2:
>>> np.array([1, 2, 3], ndmin=2)
array([[1, 2, 3]])
Type provided:
>>> np.array([1, 2, 3], dtype=complex)
array([ 1.+0.j, 2.+0.j, 3.+0.j])
Data-type consisting of more than one element:
>>> x = np.array([(1,2),(3,4)],dtype=[('a','<i4'),('b','<i4')])
>>> x['a']
array([1, 3])
Creating an array from sub-classes:
>>> np.array(np.mat('1 2; 3 4'))
array([[1, 2],
[3, 4]])
>>> np.array(np.mat('1 2; 3 4'), subok=True)
matrix([[1, 2],
[3, 4]])
""")
add_newdoc('numpy.core.multiarray', 'empty',
"""
empty(shape, dtype=float, order='C')
Return a new array of given shape and type, without initializing entries.
Parameters
----------
shape : int or tuple of int
Shape of the empty array
dtype : data-type, optional
Desired output data-type.
order : {'C', 'F'}, optional
Whether to store multi-dimensional data in row-major
(C-style) or column-major (Fortran-style) order in
memory.
Returns
-------
out : ndarray
Array of uninitialized (arbitrary) data of the given shape, dtype, and
order. Object arrays will be initialized to None.
See Also
--------
empty_like, zeros, ones
Notes
-----
`empty`, unlike `zeros`, does not set the array values to zero,
and may therefore be marginally faster. On the other hand, it requires
the user to manually set all the values in the array, and should be
used with caution.
Examples
--------
>>> np.empty([2, 2])
array([[ -9.74499359e+001, 6.69583040e-309],
[ 2.13182611e-314, 3.06959433e-309]]) #random
>>> np.empty([2, 2], dtype=int)
array([[-1073741821, -1067949133],
[ 496041986, 19249760]]) #random
""")
add_newdoc('numpy.core.multiarray', 'empty_like',
"""
empty_like(a, dtype=None, order='K', subok=True)
Return a new array with the same shape and type as a given array.
Parameters
----------
a : array_like
The shape and data-type of `a` define these same attributes of the
returned array.
dtype : data-type, optional
Overrides the data type of the result.
.. versionadded:: 1.6.0
order : {'C', 'F', 'A', or 'K'}, optional
Overrides the memory layout of the result. 'C' means C-order,
'F' means F-order, 'A' means 'F' if ``a`` is Fortran contiguous,
'C' otherwise. 'K' means match the layout of ``a`` as closely
as possible.
.. versionadded:: 1.6.0
subok : bool, optional.
If True, then the newly created array will use the sub-class
type of 'a', otherwise it will be a base-class array. Defaults
to True.
Returns
-------
out : ndarray
Array of uninitialized (arbitrary) data with the same
shape and type as `a`.
See Also
--------
ones_like : Return an array of ones with shape and type of input.
zeros_like : Return an array of zeros with shape and type of input.
empty : Return a new uninitialized array.
ones : Return a new array setting values to one.
zeros : Return a new array setting values to zero.
Notes
-----
This function does *not* initialize the returned array; to do that use
`zeros_like` or `ones_like` instead. It may be marginally faster than
the functions that do set the array values.
Examples
--------
>>> a = ([1,2,3], [4,5,6]) # a is array-like
>>> np.empty_like(a)
array([[-1073741821, -1073741821, 3], #random
[ 0, 0, -1073741821]])
>>> a = np.array([[1., 2., 3.],[4.,5.,6.]])
>>> np.empty_like(a)
array([[ -2.00000715e+000, 1.48219694e-323, -2.00000572e+000],#random
[ 4.38791518e-305, -2.00000715e+000, 4.17269252e-309]])
""")
add_newdoc('numpy.core.multiarray', 'scalar',
"""
scalar(dtype, obj)
Return a new scalar array of the given type initialized with obj.
This function is meant mainly for pickle support. `dtype` must be a
valid data-type descriptor. If `dtype` corresponds to an object
descriptor, then `obj` can be any object, otherwise `obj` must be a
string. If `obj` is not given, it will be interpreted as None for object
type and as zeros for all other types.
""")
add_newdoc('numpy.core.multiarray', 'zeros',
"""
zeros(shape, dtype=float, order='C')
Return a new array of given shape and type, filled with zeros.
Parameters
----------
shape : int or sequence of ints
Shape of the new array, e.g., ``(2, 3)`` or ``2``.
dtype : data-type, optional
The desired data-type for the array, e.g., `numpy.int8`. Default is
`numpy.float64`.
order : {'C', 'F'}, optional
Whether to store multidimensional data in C- or Fortran-contiguous
(row- or column-wise) order in memory.
Returns
-------
out : ndarray
Array of zeros with the given shape, dtype, and order.
See Also
--------
zeros_like : Return an array of zeros with shape and type of input.
ones_like : Return an array of ones with shape and type of input.
empty_like : Return an empty array with shape and type of input.
ones : Return a new array setting values to one.
empty : Return a new uninitialized array.
Examples
--------
>>> np.zeros(5)
array([ 0., 0., 0., 0., 0.])
>>> np.zeros((5,), dtype=np.int)
array([0, 0, 0, 0, 0])
>>> np.zeros((2, 1))
array([[ 0.],
[ 0.]])
>>> s = (2,2)
>>> np.zeros(s)
array([[ 0., 0.],
[ 0., 0.]])
>>> np.zeros((2,), dtype=[('x', 'i4'), ('y', 'i4')]) # custom dtype
array([(0, 0), (0, 0)],
dtype=[('x', '<i4'), ('y', '<i4')])
""")
add_newdoc('numpy.core.multiarray', 'set_typeDict',
"""set_typeDict(dict)
Set the internal dictionary that can look up an array type using a
registered code.
""")
add_newdoc('numpy.core.multiarray', 'fromstring',
"""
fromstring(string, dtype=float, count=-1, sep='')
A new 1-D array initialized from raw binary or text data in a string.
Parameters
----------
string : str
A string containing the data.
dtype : data-type, optional
The data type of the array; default: float. For binary input data,
the data must be in exactly this format.
count : int, optional
Read this number of `dtype` elements from the data. If this is
negative (the default), the count will be determined from the
length of the data.
sep : str, optional
If not provided or, equivalently, the empty string, the data will
be interpreted as binary data; otherwise, as ASCII text with
decimal numbers. Also in this latter case, this argument is
interpreted as the string separating numbers in the data; extra
whitespace between elements is also ignored.
Returns
-------
arr : ndarray
The constructed array.
Raises
------
ValueError
If the string is not the correct size to satisfy the requested
`dtype` and `count`.
See Also
--------
frombuffer, fromfile, fromiter
Examples
--------
>>> np.fromstring('\\x01\\x02', dtype=np.uint8)
array([1, 2], dtype=uint8)
>>> np.fromstring('1 2', dtype=int, sep=' ')
array([1, 2])
>>> np.fromstring('1, 2', dtype=int, sep=',')
array([1, 2])
>>> np.fromstring('\\x01\\x02\\x03\\x04\\x05', dtype=np.uint8, count=3)
array([1, 2, 3], dtype=uint8)
""")
add_newdoc('numpy.core.multiarray', 'fromiter',
"""
fromiter(iterable, dtype, count=-1)
Create a new 1-dimensional array from an iterable object.
Parameters
----------
iterable : iterable object
An iterable object providing data for the array.
dtype : data-type
The data-type of the returned array.
count : int, optional
The number of items to read from *iterable*. The default is -1,
which means all data is read.
Returns
-------
out : ndarray
The output array.
Notes
-----
Specify `count` to improve performance. It allows ``fromiter`` to
pre-allocate the output array, instead of resizing it on demand.
Examples
--------
>>> iterable = (x*x for x in range(5))
>>> np.fromiter(iterable, np.float)
array([ 0., 1., 4., 9., 16.])
""")
add_newdoc('numpy.core.multiarray', 'fromfile',
"""
fromfile(file, dtype=float, count=-1, sep='')
Construct an array from data in a text or binary file.
A highly efficient way of reading binary data with a known data-type,
as well as parsing simply formatted text files. Data written using the
`tofile` method can be read using this function.
Parameters
----------
file : file or str
Open file object or filename.
dtype : data-type
Data type of the returned array.
For binary files, it is used to determine the size and byte-order
of the items in the file.
count : int
Number of items to read. ``-1`` means all items (i.e., the complete
file).
sep : str
Separator between items if file is a text file.
Empty ("") separator means the file should be treated as binary.
Spaces (" ") in the separator match zero or more whitespace characters.
A separator consisting only of spaces must match at least one
whitespace.
See also
--------
load, save
ndarray.tofile
loadtxt : More flexible way of loading data from a text file.
Notes
-----
Do not rely on the combination of `tofile` and `fromfile` for
data storage, as the binary files generated are are not platform
independent. In particular, no byte-order or data-type information is
saved. Data can be stored in the platform independent ``.npy`` format
using `save` and `load` instead.
Examples
--------
Construct an ndarray:
>>> dt = np.dtype([('time', [('min', int), ('sec', int)]),
... ('temp', float)])
>>> x = np.zeros((1,), dtype=dt)
>>> x['time']['min'] = 10; x['temp'] = 98.25
>>> x
array([((10, 0), 98.25)],
dtype=[('time', [('min', '<i4'), ('sec', '<i4')]), ('temp', '<f8')])
Save the raw data to disk:
>>> import os
>>> fname = os.tmpnam()
>>> x.tofile(fname)
Read the raw data from disk:
>>> np.fromfile(fname, dtype=dt)
array([((10, 0), 98.25)],
dtype=[('time', [('min', '<i4'), ('sec', '<i4')]), ('temp', '<f8')])
The recommended way to store and load data:
>>> np.save(fname, x)
>>> np.load(fname + '.npy')
array([((10, 0), 98.25)],
dtype=[('time', [('min', '<i4'), ('sec', '<i4')]), ('temp', '<f8')])
""")
add_newdoc('numpy.core.multiarray', 'frombuffer',
"""
frombuffer(buffer, dtype=float, count=-1, offset=0)
Interpret a buffer as a 1-dimensional array.
Parameters
----------
buffer : buffer_like
An object that exposes the buffer interface.
dtype : data-type, optional
Data-type of the returned array; default: float.
count : int, optional
Number of items to read. ``-1`` means all data in the buffer.
offset : int, optional
Start reading the buffer from this offset; default: 0.
Notes
-----
If the buffer has data that is not in machine byte-order, this should
be specified as part of the data-type, e.g.::
>>> dt = np.dtype(int)
>>> dt = dt.newbyteorder('>')
>>> np.frombuffer(buf, dtype=dt)
The data of the resulting array will not be byteswapped, but will be
interpreted correctly.
Examples
--------
>>> s = 'hello world'
>>> np.frombuffer(s, dtype='S1', count=5, offset=6)
array(['w', 'o', 'r', 'l', 'd'],
dtype='|S1')
""")
add_newdoc('numpy.core.multiarray', 'concatenate',
"""
concatenate((a1, a2, ...), axis=0)
Join a sequence of arrays along an existing axis.
Parameters
----------
a1, a2, ... : sequence of array_like
The arrays must have the same shape, except in the dimension
corresponding to `axis` (the first, by default).
axis : int, optional
The axis along which the arrays will be joined. Default is 0.
Returns
-------
res : ndarray
The concatenated array.
See Also
--------
ma.concatenate : Concatenate function that preserves input masks.
array_split : Split an array into multiple sub-arrays of equal or
near-equal size.
split : Split array into a list of multiple sub-arrays of equal size.
hsplit : Split array into multiple sub-arrays horizontally (column wise)
vsplit : Split array into multiple sub-arrays vertically (row wise)
dsplit : Split array into multiple sub-arrays along the 3rd axis (depth).
stack : Stack a sequence of arrays along a new axis.
hstack : Stack arrays in sequence horizontally (column wise)
vstack : Stack arrays in sequence vertically (row wise)
dstack : Stack arrays in sequence depth wise (along third dimension)
Notes
-----
When one or more of the arrays to be concatenated is a MaskedArray,
this function will return a MaskedArray object instead of an ndarray,
but the input masks are *not* preserved. In cases where a MaskedArray
is expected as input, use the ma.concatenate function from the masked
array module instead.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> b = np.array([[5, 6]])
>>> np.concatenate((a, b), axis=0)
array([[1, 2],
[3, 4],
[5, 6]])
>>> np.concatenate((a, b.T), axis=1)
array([[1, 2, 5],
[3, 4, 6]])
This function will not preserve masking of MaskedArray inputs.
>>> a = np.ma.arange(3)
>>> a[1] = np.ma.masked
>>> b = np.arange(2, 5)
>>> a
masked_array(data = [0 -- 2],
mask = [False True False],
fill_value = 999999)
>>> b
array([2, 3, 4])
>>> np.concatenate([a, b])
masked_array(data = [0 1 2 2 3 4],
mask = False,
fill_value = 999999)
>>> np.ma.concatenate([a, b])
masked_array(data = [0 -- 2 2 3 4],
mask = [False True False False False False],
fill_value = 999999)
""")
add_newdoc('numpy.core', 'inner',
"""
inner(a, b)
Inner product of two arrays.
Ordinary inner product of vectors for 1-D arrays (without complex
conjugation), in higher dimensions a sum product over the last axes.
Parameters
----------
a, b : array_like
If `a` and `b` are nonscalar, their last dimensions must match.
Returns
-------
out : ndarray
`out.shape = a.shape[:-1] + b.shape[:-1]`
Raises
------
ValueError
If the last dimension of `a` and `b` has different size.
See Also
--------
tensordot : Sum products over arbitrary axes.
dot : Generalised matrix product, using second last dimension of `b`.
einsum : Einstein summation convention.
Notes
-----
For vectors (1-D arrays) it computes the ordinary inner-product::
np.inner(a, b) = sum(a[:]*b[:])
More generally, if `ndim(a) = r > 0` and `ndim(b) = s > 0`::
np.inner(a, b) = np.tensordot(a, b, axes=(-1,-1))
or explicitly::
np.inner(a, b)[i0,...,ir-1,j0,...,js-1]
= sum(a[i0,...,ir-1,:]*b[j0,...,js-1,:])
In addition `a` or `b` may be scalars, in which case::
np.inner(a,b) = a*b
Examples
--------
Ordinary inner product for vectors:
>>> a = np.array([1,2,3])
>>> b = np.array([0,1,0])
>>> np.inner(a, b)
2
A multidimensional example:
>>> a = np.arange(24).reshape((2,3,4))
>>> b = np.arange(4)
>>> np.inner(a, b)
array([[ 14, 38, 62],
[ 86, 110, 134]])
An example where `b` is a scalar:
>>> np.inner(np.eye(2), 7)
array([[ 7., 0.],
[ 0., 7.]])
""")
add_newdoc('numpy.core', 'fastCopyAndTranspose',
"""_fastCopyAndTranspose(a)""")
add_newdoc('numpy.core.multiarray', 'correlate',
"""cross_correlate(a,v, mode=0)""")
add_newdoc('numpy.core.multiarray', 'arange',
"""
arange([start,] stop[, step,], dtype=None)
Return evenly spaced values within a given interval.
Values are generated within the half-open interval ``[start, stop)``
(in other words, the interval including `start` but excluding `stop`).
For integer arguments the function is equivalent to the Python built-in
`range <http://docs.python.org/lib/built-in-funcs.html>`_ function,
but returns an ndarray rather than a list.
When using a non-integer step, such as 0.1, the results will often not
be consistent. It is better to use ``linspace`` for these cases.
Parameters
----------
start : number, optional
Start of interval. The interval includes this value. The default
start value is 0.
stop : number
End of interval. The interval does not include this value, except
in some cases where `step` is not an integer and floating point
round-off affects the length of `out`.
step : number, optional
Spacing between values. For any output `out`, this is the distance
between two adjacent values, ``out[i+1] - out[i]``. The default
step size is 1. If `step` is specified, `start` must also be given.
dtype : dtype
The type of the output array. If `dtype` is not given, infer the data
type from the other input arguments.
Returns
-------
arange : ndarray
Array of evenly spaced values.
For floating point arguments, the length of the result is
``ceil((stop - start)/step)``. Because of floating point overflow,
this rule may result in the last element of `out` being greater
than `stop`.
See Also
--------
linspace : Evenly spaced numbers with careful handling of endpoints.
ogrid: Arrays of evenly spaced numbers in N-dimensions.
mgrid: Grid-shaped arrays of evenly spaced numbers in N-dimensions.
Examples
--------
>>> np.arange(3)
array([0, 1, 2])
>>> np.arange(3.0)
array([ 0., 1., 2.])
>>> np.arange(3,7)
array([3, 4, 5, 6])
>>> np.arange(3,7,2)
array([3, 5])
""")
add_newdoc('numpy.core.multiarray', '_get_ndarray_c_version',
"""_get_ndarray_c_version()
Return the compile time NDARRAY_VERSION number.
""")
add_newdoc('numpy.core.multiarray', '_reconstruct',
"""_reconstruct(subtype, shape, dtype)
Construct an empty array. Used by Pickles.
""")
add_newdoc('numpy.core.multiarray', 'set_string_function',
"""
set_string_function(f, repr=1)
Internal method to set a function to be used when pretty printing arrays.
""")
add_newdoc('numpy.core.multiarray', 'set_numeric_ops',
"""
set_numeric_ops(op1=func1, op2=func2, ...)
Set numerical operators for array objects.
Parameters
----------
op1, op2, ... : callable
Each ``op = func`` pair describes an operator to be replaced.
For example, ``add = lambda x, y: np.add(x, y) % 5`` would replace
addition by modulus 5 addition.
Returns
-------
saved_ops : list of callables
A list of all operators, stored before making replacements.
Notes
-----
.. WARNING::
Use with care! Incorrect usage may lead to memory errors.
A function replacing an operator cannot make use of that operator.
For example, when replacing add, you may not use ``+``. Instead,
directly call ufuncs.
Examples
--------
>>> def add_mod5(x, y):
... return np.add(x, y) % 5
...
>>> old_funcs = np.set_numeric_ops(add=add_mod5)
>>> x = np.arange(12).reshape((3, 4))
>>> x + x
array([[0, 2, 4, 1],
[3, 0, 2, 4],
[1, 3, 0, 2]])
>>> ignore = np.set_numeric_ops(**old_funcs) # restore operators
""")
add_newdoc('numpy.core.multiarray', 'where',
"""
where(condition, [x, y])
Return elements, either from `x` or `y`, depending on `condition`.
If only `condition` is given, return ``condition.nonzero()``.
Parameters
----------
condition : array_like, bool
When True, yield `x`, otherwise yield `y`.
x, y : array_like, optional
Values from which to choose. `x` and `y` need to have the same
shape as `condition`.
Returns
-------
out : ndarray or tuple of ndarrays
If both `x` and `y` are specified, the output array contains
elements of `x` where `condition` is True, and elements from
`y` elsewhere.
If only `condition` is given, return the tuple
``condition.nonzero()``, the indices where `condition` is True.
See Also
--------
nonzero, choose
Notes
-----
If `x` and `y` are given and input arrays are 1-D, `where` is
equivalent to::
[xv if c else yv for (c,xv,yv) in zip(condition,x,y)]
Examples
--------
>>> np.where([[True, False], [True, True]],
... [[1, 2], [3, 4]],
... [[9, 8], [7, 6]])
array([[1, 8],
[3, 4]])
>>> np.where([[0, 1], [1, 0]])
(array([0, 1]), array([1, 0]))
>>> x = np.arange(9.).reshape(3, 3)
>>> np.where( x > 5 )
(array([2, 2, 2]), array([0, 1, 2]))
>>> x[np.where( x > 3.0 )] # Note: result is 1D.
array([ 4., 5., 6., 7., 8.])
>>> np.where(x < 5, x, -1) # Note: broadcasting.
array([[ 0., 1., 2.],
[ 3., 4., -1.],
[-1., -1., -1.]])
Find the indices of elements of `x` that are in `goodvalues`.
>>> goodvalues = [3, 4, 7]
>>> ix = np.in1d(x.ravel(), goodvalues).reshape(x.shape)
>>> ix
array([[False, False, False],
[ True, True, False],
[False, True, False]], dtype=bool)
>>> np.where(ix)
(array([1, 1, 2]), array([0, 1, 1]))
""")
add_newdoc('numpy.core.multiarray', 'lexsort',
"""
lexsort(keys, axis=-1)
Perform an indirect sort using a sequence of keys.
Given multiple sorting keys, which can be interpreted as columns in a
spreadsheet, lexsort returns an array of integer indices that describes
the sort order by multiple columns. The last key in the sequence is used
for the primary sort order, the second-to-last key for the secondary sort
order, and so on. The keys argument must be a sequence of objects that
can be converted to arrays of the same shape. If a 2D array is provided
for the keys argument, it's rows are interpreted as the sorting keys and
sorting is according to the last row, second last row etc.
Parameters
----------
keys : (k, N) array or tuple containing k (N,)-shaped sequences
The `k` different "columns" to be sorted. The last column (or row if
`keys` is a 2D array) is the primary sort key.
axis : int, optional
Axis to be indirectly sorted. By default, sort over the last axis.
Returns
-------
indices : (N,) ndarray of ints
Array of indices that sort the keys along the specified axis.
See Also
--------
argsort : Indirect sort.
ndarray.sort : In-place sort.
sort : Return a sorted copy of an array.
Examples
--------
Sort names: first by surname, then by name.
>>> surnames = ('Hertz', 'Galilei', 'Hertz')
>>> first_names = ('Heinrich', 'Galileo', 'Gustav')
>>> ind = np.lexsort((first_names, surnames))
>>> ind
array([1, 2, 0])
>>> [surnames[i] + ", " + first_names[i] for i in ind]
['Galilei, Galileo', 'Hertz, Gustav', 'Hertz, Heinrich']
Sort two columns of numbers:
>>> a = [1,5,1,4,3,4,4] # First column
>>> b = [9,4,0,4,0,2,1] # Second column
>>> ind = np.lexsort((b,a)) # Sort by a, then by b
>>> print(ind)
[2 0 4 6 5 3 1]
>>> [(a[i],b[i]) for i in ind]
[(1, 0), (1, 9), (3, 0), (4, 1), (4, 2), (4, 4), (5, 4)]
Note that sorting is first according to the elements of ``a``.
Secondary sorting is according to the elements of ``b``.
A normal ``argsort`` would have yielded:
>>> [(a[i],b[i]) for i in np.argsort(a)]
[(1, 9), (1, 0), (3, 0), (4, 4), (4, 2), (4, 1), (5, 4)]
Structured arrays are sorted lexically by ``argsort``:
>>> x = np.array([(1,9), (5,4), (1,0), (4,4), (3,0), (4,2), (4,1)],
... dtype=np.dtype([('x', int), ('y', int)]))
>>> np.argsort(x) # or np.argsort(x, order=('x', 'y'))
array([2, 0, 4, 6, 5, 3, 1])
""")
add_newdoc('numpy.core.multiarray', 'can_cast',
"""
can_cast(from, totype, casting = 'safe')
Returns True if cast between data types can occur according to the
casting rule. If from is a scalar or array scalar, also returns
True if the scalar value can be cast without overflow or truncation
to an integer.
Parameters
----------
from : dtype, dtype specifier, scalar, or array
Data type, scalar, or array to cast from.
totype : dtype or dtype specifier
Data type to cast to.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
Returns
-------
out : bool
True if cast can occur according to the casting rule.
Notes
-----
Starting in NumPy 1.9, can_cast function now returns False in 'safe'
casting mode for integer/float dtype and string dtype if the string dtype
length is not long enough to store the max integer/float value converted
to a string. Previously can_cast in 'safe' mode returned True for
integer/float dtype and a string dtype of any length.
See also
--------
dtype, result_type
Examples
--------
Basic examples
>>> np.can_cast(np.int32, np.int64)
True
>>> np.can_cast(np.float64, np.complex)
True
>>> np.can_cast(np.complex, np.float)
False
>>> np.can_cast('i8', 'f8')
True
>>> np.can_cast('i8', 'f4')
False
>>> np.can_cast('i4', 'S4')
False
Casting scalars
>>> np.can_cast(100, 'i1')
True
>>> np.can_cast(150, 'i1')
False
>>> np.can_cast(150, 'u1')
True
>>> np.can_cast(3.5e100, np.float32)
False
>>> np.can_cast(1000.0, np.float32)
True
Array scalar checks the value, array does not
>>> np.can_cast(np.array(1000.0), np.float32)
True
>>> np.can_cast(np.array([1000.0]), np.float32)
False
Using the casting rules
>>> np.can_cast('i8', 'i8', 'no')
True
>>> np.can_cast('<i8', '>i8', 'no')
False
>>> np.can_cast('<i8', '>i8', 'equiv')
True
>>> np.can_cast('<i4', '>i8', 'equiv')
False
>>> np.can_cast('<i4', '>i8', 'safe')
True
>>> np.can_cast('<i8', '>i4', 'safe')
False
>>> np.can_cast('<i8', '>i4', 'same_kind')
True
>>> np.can_cast('<i8', '>u4', 'same_kind')
False
>>> np.can_cast('<i8', '>u4', 'unsafe')
True
""")
add_newdoc('numpy.core.multiarray', 'promote_types',
"""
promote_types(type1, type2)
Returns the data type with the smallest size and smallest scalar
kind to which both ``type1`` and ``type2`` may be safely cast.
The returned data type is always in native byte order.
This function is symmetric and associative.
Parameters
----------
type1 : dtype or dtype specifier
First data type.
type2 : dtype or dtype specifier
Second data type.
Returns
-------
out : dtype
The promoted data type.
Notes
-----
.. versionadded:: 1.6.0
Starting in NumPy 1.9, promote_types function now returns a valid string
length when given an integer or float dtype as one argument and a string
dtype as another argument. Previously it always returned the input string
dtype, even if it wasn't long enough to store the max integer/float value
converted to a string.
See Also
--------
result_type, dtype, can_cast
Examples
--------
>>> np.promote_types('f4', 'f8')
dtype('float64')
>>> np.promote_types('i8', 'f4')
dtype('float64')
>>> np.promote_types('>i8', '<c8')
dtype('complex128')
>>> np.promote_types('i4', 'S8')
dtype('S11')
""")
add_newdoc('numpy.core.multiarray', 'min_scalar_type',
"""
min_scalar_type(a)
For scalar ``a``, returns the data type with the smallest size
and smallest scalar kind which can hold its value. For non-scalar
array ``a``, returns the vector's dtype unmodified.
Floating point values are not demoted to integers,
and complex values are not demoted to floats.
Parameters
----------
a : scalar or array_like
The value whose minimal data type is to be found.
Returns
-------
out : dtype
The minimal data type.
Notes
-----
.. versionadded:: 1.6.0
See Also
--------
result_type, promote_types, dtype, can_cast
Examples
--------
>>> np.min_scalar_type(10)
dtype('uint8')
>>> np.min_scalar_type(-260)
dtype('int16')
>>> np.min_scalar_type(3.1)
dtype('float16')
>>> np.min_scalar_type(1e50)
dtype('float64')
>>> np.min_scalar_type(np.arange(4,dtype='f8'))
dtype('float64')
""")
add_newdoc('numpy.core.multiarray', 'result_type',
"""
result_type(*arrays_and_dtypes)
Returns the type that results from applying the NumPy
type promotion rules to the arguments.
Type promotion in NumPy works similarly to the rules in languages
like C++, with some slight differences. When both scalars and
arrays are used, the array's type takes precedence and the actual value
of the scalar is taken into account.
For example, calculating 3*a, where a is an array of 32-bit floats,
intuitively should result in a 32-bit float output. If the 3 is a
32-bit integer, the NumPy rules indicate it can't convert losslessly
into a 32-bit float, so a 64-bit float should be the result type.
By examining the value of the constant, '3', we see that it fits in
an 8-bit integer, which can be cast losslessly into the 32-bit float.
Parameters
----------
arrays_and_dtypes : list of arrays and dtypes
The operands of some operation whose result type is needed.
Returns
-------
out : dtype
The result type.
See also
--------
dtype, promote_types, min_scalar_type, can_cast
Notes
-----
.. versionadded:: 1.6.0
The specific algorithm used is as follows.
Categories are determined by first checking which of boolean,
integer (int/uint), or floating point (float/complex) the maximum
kind of all the arrays and the scalars are.
If there are only scalars or the maximum category of the scalars
is higher than the maximum category of the arrays,
the data types are combined with :func:`promote_types`
to produce the return value.
Otherwise, `min_scalar_type` is called on each array, and
the resulting data types are all combined with :func:`promote_types`
to produce the return value.
The set of int values is not a subset of the uint values for types
with the same number of bits, something not reflected in
:func:`min_scalar_type`, but handled as a special case in `result_type`.
Examples
--------
>>> np.result_type(3, np.arange(7, dtype='i1'))
dtype('int8')
>>> np.result_type('i4', 'c8')
dtype('complex128')
>>> np.result_type(3.0, -2)
dtype('float64')
""")
add_newdoc('numpy.core.multiarray', 'newbuffer',
"""
newbuffer(size)
Return a new uninitialized buffer object.
Parameters
----------
size : int
Size in bytes of returned buffer object.
Returns
-------
newbuffer : buffer object
Returned, uninitialized buffer object of `size` bytes.
""")
add_newdoc('numpy.core.multiarray', 'getbuffer',
"""
getbuffer(obj [,offset[, size]])
Create a buffer object from the given object referencing a slice of
length size starting at offset.
Default is the entire buffer. A read-write buffer is attempted followed
by a read-only buffer.
Parameters
----------
obj : object
offset : int, optional
size : int, optional
Returns
-------
buffer_obj : buffer
Examples
--------
>>> buf = np.getbuffer(np.ones(5), 1, 3)
>>> len(buf)
3
>>> buf[0]
'\\x00'
>>> buf
<read-write buffer for 0x8af1e70, size 3, offset 1 at 0x8ba4ec0>
""")
add_newdoc('numpy.core', 'dot',
"""
dot(a, b, out=None)
Dot product of two arrays.
For 2-D arrays it is equivalent to matrix multiplication, and for 1-D
arrays to inner product of vectors (without complex conjugation). For
N dimensions it is a sum product over the last axis of `a` and
the second-to-last of `b`::
dot(a, b)[i,j,k,m] = sum(a[i,j,:] * b[k,:,m])
Parameters
----------
a : array_like
First argument.
b : array_like
Second argument.
out : ndarray, optional
Output argument. This must have the exact kind that would be returned
if it was not used. In particular, it must have the right type, must be
C-contiguous, and its dtype must be the dtype that would be returned
for `dot(a,b)`. This is a performance feature. Therefore, if these
conditions are not met, an exception is raised, instead of attempting
to be flexible.
Returns
-------
output : ndarray
Returns the dot product of `a` and `b`. If `a` and `b` are both
scalars or both 1-D arrays then a scalar is returned; otherwise
an array is returned.
If `out` is given, then it is returned.
Raises
------
ValueError
If the last dimension of `a` is not the same size as
the second-to-last dimension of `b`.
See Also
--------
vdot : Complex-conjugating dot product.
tensordot : Sum products over arbitrary axes.
einsum : Einstein summation convention.
matmul : '@' operator as method with out parameter.
Examples
--------
>>> np.dot(3, 4)
12
Neither argument is complex-conjugated:
>>> np.dot([2j, 3j], [2j, 3j])
(-13+0j)
For 2-D arrays it is the matrix product:
>>> a = [[1, 0], [0, 1]]
>>> b = [[4, 1], [2, 2]]
>>> np.dot(a, b)
array([[4, 1],
[2, 2]])
>>> a = np.arange(3*4*5*6).reshape((3,4,5,6))
>>> b = np.arange(3*4*5*6)[::-1].reshape((5,4,6,3))
>>> np.dot(a, b)[2,3,2,1,2,2]
499128
>>> sum(a[2,3,2,:] * b[1,2,:,2])
499128
""")
add_newdoc('numpy.core', 'matmul',
"""
matmul(a, b, out=None)
Matrix product of two arrays.
The behavior depends on the arguments in the following way.
- If both arguments are 2-D they are multiplied like conventional
matrices.
- If either argument is N-D, N > 2, it is treated as a stack of
matrices residing in the last two indexes and broadcast accordingly.
- If the first argument is 1-D, it is promoted to a matrix by
prepending a 1 to its dimensions. After matrix multiplication
the prepended 1 is removed.
- If the second argument is 1-D, it is promoted to a matrix by
appending a 1 to its dimensions. After matrix multiplication
the appended 1 is removed.
Multiplication by a scalar is not allowed, use ``*`` instead. Note that
multiplying a stack of matrices with a vector will result in a stack of
vectors, but matmul will not recognize it as such.
``matmul`` differs from ``dot`` in two important ways.
- Multiplication by scalars is not allowed.
- Stacks of matrices are broadcast together as if the matrices
were elements.
.. warning::
This function is preliminary and included in NumPy 1.10.0 for testing
and documentation. Its semantics will not change, but the number and
order of the optional arguments will.
.. versionadded:: 1.10.0
Parameters
----------
a : array_like
First argument.
b : array_like
Second argument.
out : ndarray, optional
Output argument. This must have the exact kind that would be returned
if it was not used. In particular, it must have the right type, must be
C-contiguous, and its dtype must be the dtype that would be returned
for `dot(a,b)`. This is a performance feature. Therefore, if these
conditions are not met, an exception is raised, instead of attempting
to be flexible.
Returns
-------
output : ndarray
Returns the dot product of `a` and `b`. If `a` and `b` are both
1-D arrays then a scalar is returned; otherwise an array is
returned. If `out` is given, then it is returned.
Raises
------
ValueError
If the last dimension of `a` is not the same size as
the second-to-last dimension of `b`.
If scalar value is passed.
See Also
--------
vdot : Complex-conjugating dot product.
tensordot : Sum products over arbitrary axes.
einsum : Einstein summation convention.
dot : alternative matrix product with different broadcasting rules.
Notes
-----
The matmul function implements the semantics of the `@` operator introduced
in Python 3.5 following PEP465.
Examples
--------
For 2-D arrays it is the matrix product:
>>> a = [[1, 0], [0, 1]]
>>> b = [[4, 1], [2, 2]]
>>> np.matmul(a, b)
array([[4, 1],
[2, 2]])
For 2-D mixed with 1-D, the result is the usual.
>>> a = [[1, 0], [0, 1]]
>>> b = [1, 2]
>>> np.matmul(a, b)
array([1, 2])
>>> np.matmul(b, a)
array([1, 2])
Broadcasting is conventional for stacks of arrays
>>> a = np.arange(2*2*4).reshape((2,2,4))
>>> b = np.arange(2*2*4).reshape((2,4,2))
>>> np.matmul(a,b).shape
(2, 2, 2)
>>> np.matmul(a,b)[0,1,1]
98
>>> sum(a[0,1,:] * b[0,:,1])
98
Vector, vector returns the scalar inner product, but neither argument
is complex-conjugated:
>>> np.matmul([2j, 3j], [2j, 3j])
(-13+0j)
Scalar multiplication raises an error.
>>> np.matmul([1,2], 3)
Traceback (most recent call last):
...
ValueError: Scalar operands are not allowed, use '*' instead
""")
add_newdoc('numpy.core', 'c_einsum',
"""
c_einsum(subscripts, *operands, out=None, dtype=None, order='K', casting='safe')
Evaluates the Einstein summation convention on the operands.
Using the Einstein summation convention, many common multi-dimensional
array operations can be represented in a simple fashion. This function
provides a way to compute such summations. The best way to understand this
function is to try the examples below, which show how many common NumPy
functions can be implemented as calls to `einsum`.
This is the core C function.
Parameters
----------
subscripts : str
Specifies the subscripts for summation.
operands : list of array_like
These are the arrays for the operation.
out : ndarray, optional
If provided, the calculation is done into this array.
dtype : {data-type, None}, optional
If provided, forces the calculation to use the data type specified.
Note that you may have to also give a more liberal `casting`
parameter to allow the conversions. Default is None.
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout of the output. 'C' means it should
be C contiguous. 'F' means it should be Fortran contiguous,
'A' means it should be 'F' if the inputs are all 'F', 'C' otherwise.
'K' means it should be as close to the layout as the inputs as
is possible, including arbitrarily permuted axes.
Default is 'K'.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur. Setting this to
'unsafe' is not recommended, as it can adversely affect accumulations.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
Default is 'safe'.
Returns
-------
output : ndarray
The calculation based on the Einstein summation convention.
See Also
--------
einsum, dot, inner, outer, tensordot
Notes
-----
.. versionadded:: 1.6.0
The subscripts string is a comma-separated list of subscript labels,
where each label refers to a dimension of the corresponding operand.
Repeated subscripts labels in one operand take the diagonal. For example,
``np.einsum('ii', a)`` is equivalent to ``np.trace(a)``.
Whenever a label is repeated, it is summed, so ``np.einsum('i,i', a, b)``
is equivalent to ``np.inner(a,b)``. If a label appears only once,
it is not summed, so ``np.einsum('i', a)`` produces a view of ``a``
with no changes.
The order of labels in the output is by default alphabetical. This
means that ``np.einsum('ij', a)`` doesn't affect a 2D array, while
``np.einsum('ji', a)`` takes its transpose.
The output can be controlled by specifying output subscript labels
as well. This specifies the label order, and allows summing to
be disallowed or forced when desired. The call ``np.einsum('i->', a)``
is like ``np.sum(a, axis=-1)``, and ``np.einsum('ii->i', a)``
is like ``np.diag(a)``. The difference is that `einsum` does not
allow broadcasting by default.
To enable and control broadcasting, use an ellipsis. Default
NumPy-style broadcasting is done by adding an ellipsis
to the left of each term, like ``np.einsum('...ii->...i', a)``.
To take the trace along the first and last axes,
you can do ``np.einsum('i...i', a)``, or to do a matrix-matrix
product with the left-most indices instead of rightmost, you can do
``np.einsum('ij...,jk...->ik...', a, b)``.
When there is only one operand, no axes are summed, and no output
parameter is provided, a view into the operand is returned instead
of a new array. Thus, taking the diagonal as ``np.einsum('ii->i', a)``
produces a view.
An alternative way to provide the subscripts and operands is as
``einsum(op0, sublist0, op1, sublist1, ..., [sublistout])``. The examples
below have corresponding `einsum` calls with the two parameter methods.
.. versionadded:: 1.10.0
Views returned from einsum are now writeable whenever the input array
is writeable. For example, ``np.einsum('ijk...->kji...', a)`` will now
have the same effect as ``np.swapaxes(a, 0, 2)`` and
``np.einsum('ii->i', a)`` will return a writeable view of the diagonal
of a 2D array.
Examples
--------
>>> a = np.arange(25).reshape(5,5)
>>> b = np.arange(5)
>>> c = np.arange(6).reshape(2,3)
>>> np.einsum('ii', a)
60
>>> np.einsum(a, [0,0])
60
>>> np.trace(a)
60
>>> np.einsum('ii->i', a)
array([ 0, 6, 12, 18, 24])
>>> np.einsum(a, [0,0], [0])
array([ 0, 6, 12, 18, 24])
>>> np.diag(a)
array([ 0, 6, 12, 18, 24])
>>> np.einsum('ij,j', a, b)
array([ 30, 80, 130, 180, 230])
>>> np.einsum(a, [0,1], b, [1])
array([ 30, 80, 130, 180, 230])
>>> np.dot(a, b)
array([ 30, 80, 130, 180, 230])
>>> np.einsum('...j,j', a, b)
array([ 30, 80, 130, 180, 230])
>>> np.einsum('ji', c)
array([[0, 3],
[1, 4],
[2, 5]])
>>> np.einsum(c, [1,0])
array([[0, 3],
[1, 4],
[2, 5]])
>>> c.T
array([[0, 3],
[1, 4],
[2, 5]])
>>> np.einsum('..., ...', 3, c)
array([[ 0, 3, 6],
[ 9, 12, 15]])
>>> np.einsum(3, [Ellipsis], c, [Ellipsis])
array([[ 0, 3, 6],
[ 9, 12, 15]])
>>> np.multiply(3, c)
array([[ 0, 3, 6],
[ 9, 12, 15]])
>>> np.einsum('i,i', b, b)
30
>>> np.einsum(b, [0], b, [0])
30
>>> np.inner(b,b)
30
>>> np.einsum('i,j', np.arange(2)+1, b)
array([[0, 1, 2, 3, 4],
[0, 2, 4, 6, 8]])
>>> np.einsum(np.arange(2)+1, [0], b, [1])
array([[0, 1, 2, 3, 4],
[0, 2, 4, 6, 8]])
>>> np.outer(np.arange(2)+1, b)
array([[0, 1, 2, 3, 4],
[0, 2, 4, 6, 8]])
>>> np.einsum('i...->...', a)
array([50, 55, 60, 65, 70])
>>> np.einsum(a, [0,Ellipsis], [Ellipsis])
array([50, 55, 60, 65, 70])
>>> np.sum(a, axis=0)
array([50, 55, 60, 65, 70])
>>> a = np.arange(60.).reshape(3,4,5)
>>> b = np.arange(24.).reshape(4,3,2)
>>> np.einsum('ijk,jil->kl', a, b)
array([[ 4400., 4730.],
[ 4532., 4874.],
[ 4664., 5018.],
[ 4796., 5162.],
[ 4928., 5306.]])
>>> np.einsum(a, [0,1,2], b, [1,0,3], [2,3])
array([[ 4400., 4730.],
[ 4532., 4874.],
[ 4664., 5018.],
[ 4796., 5162.],
[ 4928., 5306.]])
>>> np.tensordot(a,b, axes=([1,0],[0,1]))
array([[ 4400., 4730.],
[ 4532., 4874.],
[ 4664., 5018.],
[ 4796., 5162.],
[ 4928., 5306.]])
>>> a = np.arange(6).reshape((3,2))
>>> b = np.arange(12).reshape((4,3))
>>> np.einsum('ki,jk->ij', a, b)
array([[10, 28, 46, 64],
[13, 40, 67, 94]])
>>> np.einsum('ki,...k->i...', a, b)
array([[10, 28, 46, 64],
[13, 40, 67, 94]])
>>> np.einsum('k...,jk', a, b)
array([[10, 28, 46, 64],
[13, 40, 67, 94]])
>>> # since version 1.10.0
>>> a = np.zeros((3, 3))
>>> np.einsum('ii->i', a)[:] = 1
>>> a
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
""")
add_newdoc('numpy.core', 'vdot',
"""
vdot(a, b)
Return the dot product of two vectors.
The vdot(`a`, `b`) function handles complex numbers differently than
dot(`a`, `b`). If the first argument is complex the complex conjugate
of the first argument is used for the calculation of the dot product.
Note that `vdot` handles multidimensional arrays differently than `dot`:
it does *not* perform a matrix product, but flattens input arguments
to 1-D vectors first. Consequently, it should only be used for vectors.
Parameters
----------
a : array_like
If `a` is complex the complex conjugate is taken before calculation
of the dot product.
b : array_like
Second argument to the dot product.
Returns
-------
output : ndarray
Dot product of `a` and `b`. Can be an int, float, or
complex depending on the types of `a` and `b`.
See Also
--------
dot : Return the dot product without using the complex conjugate of the
first argument.
Examples
--------
>>> a = np.array([1+2j,3+4j])
>>> b = np.array([5+6j,7+8j])
>>> np.vdot(a, b)
(70-8j)
>>> np.vdot(b, a)
(70+8j)
Note that higher-dimensional arrays are flattened!
>>> a = np.array([[1, 4], [5, 6]])
>>> b = np.array([[4, 1], [2, 2]])
>>> np.vdot(a, b)
30
>>> np.vdot(b, a)
30
>>> 1*4 + 4*1 + 5*2 + 6*2
30
""")
##############################################################################
#
# Documentation for ndarray attributes and methods
#
##############################################################################
##############################################################################
#
# ndarray object
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'ndarray',
"""
ndarray(shape, dtype=float, buffer=None, offset=0,
strides=None, order=None)
An array object represents a multidimensional, homogeneous array
of fixed-size items. An associated data-type object describes the
format of each element in the array (its byte-order, how many bytes it
occupies in memory, whether it is an integer, a floating point number,
or something else, etc.)
Arrays should be constructed using `array`, `zeros` or `empty` (refer
to the See Also section below). The parameters given here refer to
a low-level method (`ndarray(...)`) for instantiating an array.
For more information, refer to the `numpy` module and examine the
methods and attributes of an array.
Parameters
----------
(for the __new__ method; see Notes below)
shape : tuple of ints
Shape of created array.
dtype : data-type, optional
Any object that can be interpreted as a numpy data type.
buffer : object exposing buffer interface, optional
Used to fill the array with data.
offset : int, optional
Offset of array data in buffer.
strides : tuple of ints, optional
Strides of data in memory.
order : {'C', 'F'}, optional
Row-major (C-style) or column-major (Fortran-style) order.
Attributes
----------
T : ndarray
Transpose of the array.
data : buffer
The array's elements, in memory.
dtype : dtype object
Describes the format of the elements in the array.
flags : dict
Dictionary containing information related to memory use, e.g.,
'C_CONTIGUOUS', 'OWNDATA', 'WRITEABLE', etc.
flat : numpy.flatiter object
Flattened version of the array as an iterator. The iterator
allows assignments, e.g., ``x.flat = 3`` (See `ndarray.flat` for
assignment examples; TODO).
imag : ndarray
Imaginary part of the array.
real : ndarray
Real part of the array.
size : int
Number of elements in the array.
itemsize : int
The memory use of each array element in bytes.
nbytes : int
The total number of bytes required to store the array data,
i.e., ``itemsize * size``.
ndim : int
The array's number of dimensions.
shape : tuple of ints
Shape of the array.
strides : tuple of ints
The step-size required to move from one element to the next in
memory. For example, a contiguous ``(3, 4)`` array of type
``int16`` in C-order has strides ``(8, 2)``. This implies that
to move from element to element in memory requires jumps of 2 bytes.
To move from row-to-row, one needs to jump 8 bytes at a time
(``2 * 4``).
ctypes : ctypes object
Class containing properties of the array needed for interaction
with ctypes.
base : ndarray
If the array is a view into another array, that array is its `base`
(unless that array is also a view). The `base` array is where the
array data is actually stored.
See Also
--------
array : Construct an array.
zeros : Create an array, each element of which is zero.
empty : Create an array, but leave its allocated memory unchanged (i.e.,
it contains "garbage").
dtype : Create a data-type.
Notes
-----
There are two modes of creating an array using ``__new__``:
1. If `buffer` is None, then only `shape`, `dtype`, and `order`
are used.
2. If `buffer` is an object exposing the buffer interface, then
all keywords are interpreted.
No ``__init__`` method is needed because the array is fully initialized
after the ``__new__`` method.
Examples
--------
These examples illustrate the low-level `ndarray` constructor. Refer
to the `See Also` section above for easier ways of constructing an
ndarray.
First mode, `buffer` is None:
>>> np.ndarray(shape=(2,2), dtype=float, order='F')
array([[ -1.13698227e+002, 4.25087011e-303],
[ 2.88528414e-306, 3.27025015e-309]]) #random
Second mode:
>>> np.ndarray((2,), buffer=np.array([1,2,3]),
... offset=np.int_().itemsize,
... dtype=int) # offset = 1*itemsize, i.e. skip first element
array([2, 3])
""")
##############################################################################
#
# ndarray attributes
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_interface__',
"""Array protocol: Python side."""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_finalize__',
"""None."""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_priority__',
"""Array priority."""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_struct__',
"""Array protocol: C-struct side."""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('_as_parameter_',
"""Allow the array to be interpreted as a ctypes object by returning the
data-memory location as an integer
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('base',
"""
Base object if memory is from some other object.
Examples
--------
The base of an array that owns its memory is None:
>>> x = np.array([1,2,3,4])
>>> x.base is None
True
Slicing creates a view, whose memory is shared with x:
>>> y = x[2:]
>>> y.base is x
True
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('ctypes',
"""
An object to simplify the interaction of the array with the ctypes
module.
This attribute creates an object that makes it easier to use arrays
when calling shared libraries with the ctypes module. The returned
object has, among others, data, shape, and strides attributes (see
Notes below) which themselves return ctypes objects that can be used
as arguments to a shared library.
Parameters
----------
None
Returns
-------
c : Python object
Possessing attributes data, shape, strides, etc.
See Also
--------
numpy.ctypeslib
Notes
-----
Below are the public attributes of this object which were documented
in "Guide to NumPy" (we have omitted undocumented public attributes,
as well as documented private attributes):
* data: A pointer to the memory area of the array as a Python integer.
This memory area may contain data that is not aligned, or not in correct
byte-order. The memory area may not even be writeable. The array
flags and data-type of this array should be respected when passing this
attribute to arbitrary C-code to avoid trouble that can include Python
crashing. User Beware! The value of this attribute is exactly the same
as self._array_interface_['data'][0].
* shape (c_intp*self.ndim): A ctypes array of length self.ndim where
the basetype is the C-integer corresponding to dtype('p') on this
platform. This base-type could be c_int, c_long, or c_longlong
depending on the platform. The c_intp type is defined accordingly in
numpy.ctypeslib. The ctypes array contains the shape of the underlying
array.
* strides (c_intp*self.ndim): A ctypes array of length self.ndim where
the basetype is the same as for the shape attribute. This ctypes array
contains the strides information from the underlying array. This strides
information is important for showing how many bytes must be jumped to
get to the next element in the array.
* data_as(obj): Return the data pointer cast to a particular c-types object.
For example, calling self._as_parameter_ is equivalent to
self.data_as(ctypes.c_void_p). Perhaps you want to use the data as a
pointer to a ctypes array of floating-point data:
self.data_as(ctypes.POINTER(ctypes.c_double)).
* shape_as(obj): Return the shape tuple as an array of some other c-types
type. For example: self.shape_as(ctypes.c_short).
* strides_as(obj): Return the strides tuple as an array of some other
c-types type. For example: self.strides_as(ctypes.c_longlong).
Be careful using the ctypes attribute - especially on temporary
arrays or arrays constructed on the fly. For example, calling
``(a+b).ctypes.data_as(ctypes.c_void_p)`` returns a pointer to memory
that is invalid because the array created as (a+b) is deallocated
before the next Python statement. You can avoid this problem using
either ``c=a+b`` or ``ct=(a+b).ctypes``. In the latter case, ct will
hold a reference to the array until ct is deleted or re-assigned.
If the ctypes module is not available, then the ctypes attribute
of array objects still returns something useful, but ctypes objects
are not returned and errors may be raised instead. In particular,
the object will still have the as parameter attribute which will
return an integer equal to the data attribute.
Examples
--------
>>> import ctypes
>>> x
array([[0, 1],
[2, 3]])
>>> x.ctypes.data
30439712
>>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_long))
<ctypes.LP_c_long object at 0x01F01300>
>>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_long)).contents
c_long(0)
>>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_longlong)).contents
c_longlong(4294967296L)
>>> x.ctypes.shape
<numpy.core._internal.c_long_Array_2 object at 0x01FFD580>
>>> x.ctypes.shape_as(ctypes.c_long)
<numpy.core._internal.c_long_Array_2 object at 0x01FCE620>
>>> x.ctypes.strides
<numpy.core._internal.c_long_Array_2 object at 0x01FCE620>
>>> x.ctypes.strides_as(ctypes.c_longlong)
<numpy.core._internal.c_longlong_Array_2 object at 0x01F01300>
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('data',
"""Python buffer object pointing to the start of the array's data."""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('dtype',
"""
Data-type of the array's elements.
Parameters
----------
None
Returns
-------
d : numpy dtype object
See Also
--------
numpy.dtype
Examples
--------
>>> x
array([[0, 1],
[2, 3]])
>>> x.dtype
dtype('int32')
>>> type(x.dtype)
<type 'numpy.dtype'>
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('imag',
"""
The imaginary part of the array.
Examples
--------
>>> x = np.sqrt([1+0j, 0+1j])
>>> x.imag
array([ 0. , 0.70710678])
>>> x.imag.dtype
dtype('float64')
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('itemsize',
"""
Length of one array element in bytes.
Examples
--------
>>> x = np.array([1,2,3], dtype=np.float64)
>>> x.itemsize
8
>>> x = np.array([1,2,3], dtype=np.complex128)
>>> x.itemsize
16
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('flags',
"""
Information about the memory layout of the array.
Attributes
----------
C_CONTIGUOUS (C)
The data is in a single, C-style contiguous segment.
F_CONTIGUOUS (F)
The data is in a single, Fortran-style contiguous segment.
OWNDATA (O)
The array owns the memory it uses or borrows it from another object.
WRITEABLE (W)
The data area can be written to. Setting this to False locks
the data, making it read-only. A view (slice, etc.) inherits WRITEABLE
from its base array at creation time, but a view of a writeable
array may be subsequently locked while the base array remains writeable.
(The opposite is not true, in that a view of a locked array may not
be made writeable. However, currently, locking a base object does not
lock any views that already reference it, so under that circumstance it
is possible to alter the contents of a locked array via a previously
created writeable view onto it.) Attempting to change a non-writeable
array raises a RuntimeError exception.
ALIGNED (A)
The data and all elements are aligned appropriately for the hardware.
UPDATEIFCOPY (U)
This array is a copy of some other array. When this array is
deallocated, the base array will be updated with the contents of
this array.
FNC
F_CONTIGUOUS and not C_CONTIGUOUS.
FORC
F_CONTIGUOUS or C_CONTIGUOUS (one-segment test).
BEHAVED (B)
ALIGNED and WRITEABLE.
CARRAY (CA)
BEHAVED and C_CONTIGUOUS.
FARRAY (FA)
BEHAVED and F_CONTIGUOUS and not C_CONTIGUOUS.
Notes
-----
The `flags` object can be accessed dictionary-like (as in ``a.flags['WRITEABLE']``),
or by using lowercased attribute names (as in ``a.flags.writeable``). Short flag
names are only supported in dictionary access.
Only the UPDATEIFCOPY, WRITEABLE, and ALIGNED flags can be changed by
the user, via direct assignment to the attribute or dictionary entry,
or by calling `ndarray.setflags`.
The array flags cannot be set arbitrarily:
- UPDATEIFCOPY can only be set ``False``.
- ALIGNED can only be set ``True`` if the data is truly aligned.
- WRITEABLE can only be set ``True`` if the array owns its own memory
or the ultimate owner of the memory exposes a writeable buffer
interface or is a string.
Arrays can be both C-style and Fortran-style contiguous simultaneously.
This is clear for 1-dimensional arrays, but can also be true for higher
dimensional arrays.
Even for contiguous arrays a stride for a given dimension
``arr.strides[dim]`` may be *arbitrary* if ``arr.shape[dim] == 1``
or the array has no elements.
It does *not* generally hold that ``self.strides[-1] == self.itemsize``
for C-style contiguous arrays or ``self.strides[0] == self.itemsize`` for
Fortran-style contiguous arrays is true.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('flat',
"""
A 1-D iterator over the array.
This is a `numpy.flatiter` instance, which acts similarly to, but is not
a subclass of, Python's built-in iterator object.
See Also
--------
flatten : Return a copy of the array collapsed into one dimension.
flatiter
Examples
--------
>>> x = np.arange(1, 7).reshape(2, 3)
>>> x
array([[1, 2, 3],
[4, 5, 6]])
>>> x.flat[3]
4
>>> x.T
array([[1, 4],
[2, 5],
[3, 6]])
>>> x.T.flat[3]
5
>>> type(x.flat)
<type 'numpy.flatiter'>
An assignment example:
>>> x.flat = 3; x
array([[3, 3, 3],
[3, 3, 3]])
>>> x.flat[[1,4]] = 1; x
array([[3, 1, 3],
[3, 1, 3]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('nbytes',
"""
Total bytes consumed by the elements of the array.
Notes
-----
Does not include memory consumed by non-element attributes of the
array object.
Examples
--------
>>> x = np.zeros((3,5,2), dtype=np.complex128)
>>> x.nbytes
480
>>> np.prod(x.shape) * x.itemsize
480
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('ndim',
"""
Number of array dimensions.
Examples
--------
>>> x = np.array([1, 2, 3])
>>> x.ndim
1
>>> y = np.zeros((2, 3, 4))
>>> y.ndim
3
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('real',
"""
The real part of the array.
Examples
--------
>>> x = np.sqrt([1+0j, 0+1j])
>>> x.real
array([ 1. , 0.70710678])
>>> x.real.dtype
dtype('float64')
See Also
--------
numpy.real : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('shape',
"""
Tuple of array dimensions.
Notes
-----
May be used to "reshape" the array, as long as this would not
require a change in the total number of elements
Examples
--------
>>> x = np.array([1, 2, 3, 4])
>>> x.shape
(4,)
>>> y = np.zeros((2, 3, 4))
>>> y.shape
(2, 3, 4)
>>> y.shape = (3, 8)
>>> y
array([[ 0., 0., 0., 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0., 0., 0., 0.]])
>>> y.shape = (3, 6)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: total size of new array must be unchanged
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('size',
"""
Number of elements in the array.
Equivalent to ``np.prod(a.shape)``, i.e., the product of the array's
dimensions.
Examples
--------
>>> x = np.zeros((3, 5, 2), dtype=np.complex128)
>>> x.size
30
>>> np.prod(x.shape)
30
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('strides',
"""
Tuple of bytes to step in each dimension when traversing an array.
The byte offset of element ``(i[0], i[1], ..., i[n])`` in an array `a`
is::
offset = sum(np.array(i) * a.strides)
A more detailed explanation of strides can be found in the
"ndarray.rst" file in the NumPy reference guide.
Notes
-----
Imagine an array of 32-bit integers (each 4 bytes)::
x = np.array([[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]], dtype=np.int32)
This array is stored in memory as 40 bytes, one after the other
(known as a contiguous block of memory). The strides of an array tell
us how many bytes we have to skip in memory to move to the next position
along a certain axis. For example, we have to skip 4 bytes (1 value) to
move to the next column, but 20 bytes (5 values) to get to the same
position in the next row. As such, the strides for the array `x` will be
``(20, 4)``.
See Also
--------
numpy.lib.stride_tricks.as_strided
Examples
--------
>>> y = np.reshape(np.arange(2*3*4), (2,3,4))
>>> y
array([[[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]],
[[12, 13, 14, 15],
[16, 17, 18, 19],
[20, 21, 22, 23]]])
>>> y.strides
(48, 16, 4)
>>> y[1,1,1]
17
>>> offset=sum(y.strides * np.array((1,1,1)))
>>> offset/y.itemsize
17
>>> x = np.reshape(np.arange(5*6*7*8), (5,6,7,8)).transpose(2,3,1,0)
>>> x.strides
(32, 4, 224, 1344)
>>> i = np.array([3,5,2,2])
>>> offset = sum(i * x.strides)
>>> x[3,5,2,2]
813
>>> offset / x.itemsize
813
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('T',
"""
Same as self.transpose(), except that self is returned if
self.ndim < 2.
Examples
--------
>>> x = np.array([[1.,2.],[3.,4.]])
>>> x
array([[ 1., 2.],
[ 3., 4.]])
>>> x.T
array([[ 1., 3.],
[ 2., 4.]])
>>> x = np.array([1.,2.,3.,4.])
>>> x
array([ 1., 2., 3., 4.])
>>> x.T
array([ 1., 2., 3., 4.])
"""))
##############################################################################
#
# ndarray methods
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array__',
""" a.__array__(|dtype) -> reference if type unchanged, copy otherwise.
Returns either a new reference to self if dtype is not given or a new array
of provided data type if dtype is different from the current dtype of the
array.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_prepare__',
"""a.__array_prepare__(obj) -> Object of same type as ndarray object obj.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_wrap__',
"""a.__array_wrap__(obj) -> Object of same type as ndarray object a.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__copy__',
"""a.__copy__([order])
Return a copy of the array.
Parameters
----------
order : {'C', 'F', 'A'}, optional
If order is 'C' (False) then the result is contiguous (default).
If order is 'Fortran' (True) then the result has fortran order.
If order is 'Any' (None) then the result has fortran order
only if the array already is in fortran order.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__deepcopy__',
"""a.__deepcopy__() -> Deep copy of array.
Used if copy.deepcopy is called on an array.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__reduce__',
"""a.__reduce__()
For pickling.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__setstate__',
"""a.__setstate__(version, shape, dtype, isfortran, rawdata)
For unpickling.
Parameters
----------
version : int
optional pickle version. If omitted defaults to 0.
shape : tuple
dtype : data-type
isFortran : bool
rawdata : string or list
a binary string with the data (or a list if 'a' is an object array)
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('all',
"""
a.all(axis=None, out=None, keepdims=False)
Returns True if all elements evaluate to True.
Refer to `numpy.all` for full documentation.
See Also
--------
numpy.all : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('any',
"""
a.any(axis=None, out=None, keepdims=False)
Returns True if any of the elements of `a` evaluate to True.
Refer to `numpy.any` for full documentation.
See Also
--------
numpy.any : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('argmax',
"""
a.argmax(axis=None, out=None)
Return indices of the maximum values along the given axis.
Refer to `numpy.argmax` for full documentation.
See Also
--------
numpy.argmax : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('argmin',
"""
a.argmin(axis=None, out=None)
Return indices of the minimum values along the given axis of `a`.
Refer to `numpy.argmin` for detailed documentation.
See Also
--------
numpy.argmin : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('argsort',
"""
a.argsort(axis=-1, kind='quicksort', order=None)
Returns the indices that would sort this array.
Refer to `numpy.argsort` for full documentation.
See Also
--------
numpy.argsort : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('argpartition',
"""
a.argpartition(kth, axis=-1, kind='introselect', order=None)
Returns the indices that would partition this array.
Refer to `numpy.argpartition` for full documentation.
.. versionadded:: 1.8.0
See Also
--------
numpy.argpartition : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('astype',
"""
a.astype(dtype, order='K', casting='unsafe', subok=True, copy=True)
Copy of the array, cast to a specified type.
Parameters
----------
dtype : str or dtype
Typecode or data-type to which the array is cast.
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout order of the result.
'C' means C order, 'F' means Fortran order, 'A'
means 'F' order if all the arrays are Fortran contiguous,
'C' order otherwise, and 'K' means as close to the
order the array elements appear in memory as possible.
Default is 'K'.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur. Defaults to 'unsafe'
for backwards compatibility.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
subok : bool, optional
If True, then sub-classes will be passed-through (default), otherwise
the returned array will be forced to be a base-class array.
copy : bool, optional
By default, astype always returns a newly allocated array. If this
is set to false, and the `dtype`, `order`, and `subok`
requirements are satisfied, the input array is returned instead
of a copy.
Returns
-------
arr_t : ndarray
Unless `copy` is False and the other conditions for returning the input
array are satisfied (see description for `copy` input parameter), `arr_t`
is a new array of the same shape as the input array, with dtype, order
given by `dtype`, `order`.
Notes
-----
Starting in NumPy 1.9, astype method now returns an error if the string
dtype to cast to is not long enough in 'safe' casting mode to hold the max
value of integer/float array that is being casted. Previously the casting
was allowed even if the result was truncated.
Raises
------
ComplexWarning
When casting from complex to float or int. To avoid this,
one should use ``a.real.astype(t)``.
Examples
--------
>>> x = np.array([1, 2, 2.5])
>>> x
array([ 1. , 2. , 2.5])
>>> x.astype(int)
array([1, 2, 2])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('byteswap',
"""
a.byteswap(inplace)
Swap the bytes of the array elements
Toggle between low-endian and big-endian data representation by
returning a byteswapped array, optionally swapped in-place.
Parameters
----------
inplace : bool, optional
If ``True``, swap bytes in-place, default is ``False``.
Returns
-------
out : ndarray
The byteswapped array. If `inplace` is ``True``, this is
a view to self.
Examples
--------
>>> A = np.array([1, 256, 8755], dtype=np.int16)
>>> map(hex, A)
['0x1', '0x100', '0x2233']
>>> A.byteswap(True)
array([ 256, 1, 13090], dtype=int16)
>>> map(hex, A)
['0x100', '0x1', '0x3322']
Arrays of strings are not swapped
>>> A = np.array(['ceg', 'fac'])
>>> A.byteswap()
array(['ceg', 'fac'],
dtype='|S3')
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('choose',
"""
a.choose(choices, out=None, mode='raise')
Use an index array to construct a new array from a set of choices.
Refer to `numpy.choose` for full documentation.
See Also
--------
numpy.choose : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('clip',
"""
a.clip(min=None, max=None, out=None)
Return an array whose values are limited to ``[min, max]``.
One of max or min must be given.
Refer to `numpy.clip` for full documentation.
See Also
--------
numpy.clip : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('compress',
"""
a.compress(condition, axis=None, out=None)
Return selected slices of this array along given axis.
Refer to `numpy.compress` for full documentation.
See Also
--------
numpy.compress : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('conj',
"""
a.conj()
Complex-conjugate all elements.
Refer to `numpy.conjugate` for full documentation.
See Also
--------
numpy.conjugate : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('conjugate',
"""
a.conjugate()
Return the complex conjugate, element-wise.
Refer to `numpy.conjugate` for full documentation.
See Also
--------
numpy.conjugate : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('copy',
"""
a.copy(order='C')
Return a copy of the array.
Parameters
----------
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout of the copy. 'C' means C-order,
'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,
'C' otherwise. 'K' means match the layout of `a` as closely
as possible. (Note that this function and :func:numpy.copy are very
similar, but have different default values for their order=
arguments.)
See also
--------
numpy.copy
numpy.copyto
Examples
--------
>>> x = np.array([[1,2,3],[4,5,6]], order='F')
>>> y = x.copy()
>>> x.fill(0)
>>> x
array([[0, 0, 0],
[0, 0, 0]])
>>> y
array([[1, 2, 3],
[4, 5, 6]])
>>> y.flags['C_CONTIGUOUS']
True
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('cumprod',
"""
a.cumprod(axis=None, dtype=None, out=None)
Return the cumulative product of the elements along the given axis.
Refer to `numpy.cumprod` for full documentation.
See Also
--------
numpy.cumprod : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('cumsum',
"""
a.cumsum(axis=None, dtype=None, out=None)
Return the cumulative sum of the elements along the given axis.
Refer to `numpy.cumsum` for full documentation.
See Also
--------
numpy.cumsum : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('diagonal',
"""
a.diagonal(offset=0, axis1=0, axis2=1)
Return specified diagonals. In NumPy 1.9 the returned array is a
read-only view instead of a copy as in previous NumPy versions. In
a future version the read-only restriction will be removed.
Refer to :func:`numpy.diagonal` for full documentation.
See Also
--------
numpy.diagonal : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('dot',
"""
a.dot(b, out=None)
Dot product of two arrays.
Refer to `numpy.dot` for full documentation.
See Also
--------
numpy.dot : equivalent function
Examples
--------
>>> a = np.eye(2)
>>> b = np.ones((2, 2)) * 2
>>> a.dot(b)
array([[ 2., 2.],
[ 2., 2.]])
This array method can be conveniently chained:
>>> a.dot(b).dot(b)
array([[ 8., 8.],
[ 8., 8.]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('dump',
"""a.dump(file)
Dump a pickle of the array to the specified file.
The array can be read back with pickle.load or numpy.load.
Parameters
----------
file : str
A string naming the dump file.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('dumps',
"""
a.dumps()
Returns the pickle of the array as a string.
pickle.loads or numpy.loads will convert the string back to an array.
Parameters
----------
None
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('fill',
"""
a.fill(value)
Fill the array with a scalar value.
Parameters
----------
value : scalar
All elements of `a` will be assigned this value.
Examples
--------
>>> a = np.array([1, 2])
>>> a.fill(0)
>>> a
array([0, 0])
>>> a = np.empty(2)
>>> a.fill(1)
>>> a
array([ 1., 1.])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('flatten',
"""
a.flatten(order='C')
Return a copy of the array collapsed into one dimension.
Parameters
----------
order : {'C', 'F', 'A', 'K'}, optional
'C' means to flatten in row-major (C-style) order.
'F' means to flatten in column-major (Fortran-
style) order. 'A' means to flatten in column-major
order if `a` is Fortran *contiguous* in memory,
row-major order otherwise. 'K' means to flatten
`a` in the order the elements occur in memory.
The default is 'C'.
Returns
-------
y : ndarray
A copy of the input array, flattened to one dimension.
See Also
--------
ravel : Return a flattened array.
flat : A 1-D flat iterator over the array.
Examples
--------
>>> a = np.array([[1,2], [3,4]])
>>> a.flatten()
array([1, 2, 3, 4])
>>> a.flatten('F')
array([1, 3, 2, 4])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('getfield',
"""
a.getfield(dtype, offset=0)
Returns a field of the given array as a certain type.
A field is a view of the array data with a given data-type. The values in
the view are determined by the given type and the offset into the current
array in bytes. The offset needs to be such that the view dtype fits in the
array dtype; for example an array of dtype complex128 has 16-byte elements.
If taking a view with a 32-bit integer (4 bytes), the offset needs to be
between 0 and 12 bytes.
Parameters
----------
dtype : str or dtype
The data type of the view. The dtype size of the view can not be larger
than that of the array itself.
offset : int
Number of bytes to skip before beginning the element view.
Examples
--------
>>> x = np.diag([1.+1.j]*2)
>>> x[1, 1] = 2 + 4.j
>>> x
array([[ 1.+1.j, 0.+0.j],
[ 0.+0.j, 2.+4.j]])
>>> x.getfield(np.float64)
array([[ 1., 0.],
[ 0., 2.]])
By choosing an offset of 8 bytes we can select the complex part of the
array for our view:
>>> x.getfield(np.float64, offset=8)
array([[ 1., 0.],
[ 0., 4.]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('item',
"""
a.item(*args)
Copy an element of an array to a standard Python scalar and return it.
Parameters
----------
\\*args : Arguments (variable number and type)
* none: in this case, the method only works for arrays
with one element (`a.size == 1`), which element is
copied into a standard Python scalar object and returned.
* int_type: this argument is interpreted as a flat index into
the array, specifying which element to copy and return.
* tuple of int_types: functions as does a single int_type argument,
except that the argument is interpreted as an nd-index into the
array.
Returns
-------
z : Standard Python scalar object
A copy of the specified element of the array as a suitable
Python scalar
Notes
-----
When the data type of `a` is longdouble or clongdouble, item() returns
a scalar array object because there is no available Python scalar that
would not lose information. Void arrays return a buffer object for item(),
unless fields are defined, in which case a tuple is returned.
`item` is very similar to a[args], except, instead of an array scalar,
a standard Python scalar is returned. This can be useful for speeding up
access to elements of the array and doing arithmetic on elements of the
array using Python's optimized math.
Examples
--------
>>> x = np.random.randint(9, size=(3, 3))
>>> x
array([[3, 1, 7],
[2, 8, 3],
[8, 5, 3]])
>>> x.item(3)
2
>>> x.item(7)
5
>>> x.item((0, 1))
1
>>> x.item((2, 2))
3
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('itemset',
"""
a.itemset(*args)
Insert scalar into an array (scalar is cast to array's dtype, if possible)
There must be at least 1 argument, and define the last argument
as *item*. Then, ``a.itemset(*args)`` is equivalent to but faster
than ``a[args] = item``. The item should be a scalar value and `args`
must select a single item in the array `a`.
Parameters
----------
\*args : Arguments
If one argument: a scalar, only used in case `a` is of size 1.
If two arguments: the last argument is the value to be set
and must be a scalar, the first argument specifies a single array
element location. It is either an int or a tuple.
Notes
-----
Compared to indexing syntax, `itemset` provides some speed increase
for placing a scalar into a particular location in an `ndarray`,
if you must do this. However, generally this is discouraged:
among other problems, it complicates the appearance of the code.
Also, when using `itemset` (and `item`) inside a loop, be sure
to assign the methods to a local variable to avoid the attribute
look-up at each loop iteration.
Examples
--------
>>> x = np.random.randint(9, size=(3, 3))
>>> x
array([[3, 1, 7],
[2, 8, 3],
[8, 5, 3]])
>>> x.itemset(4, 0)
>>> x.itemset((2, 2), 9)
>>> x
array([[3, 1, 7],
[2, 0, 3],
[8, 5, 9]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('max',
"""
a.max(axis=None, out=None)
Return the maximum along a given axis.
Refer to `numpy.amax` for full documentation.
See Also
--------
numpy.amax : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('mean',
"""
a.mean(axis=None, dtype=None, out=None, keepdims=False)
Returns the average of the array elements along given axis.
Refer to `numpy.mean` for full documentation.
See Also
--------
numpy.mean : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('min',
"""
a.min(axis=None, out=None, keepdims=False)
Return the minimum along a given axis.
Refer to `numpy.amin` for full documentation.
See Also
--------
numpy.amin : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'shares_memory',
"""
shares_memory(a, b, max_work=None)
Determine if two arrays share memory
Parameters
----------
a, b : ndarray
Input arrays
max_work : int, optional
Effort to spend on solving the overlap problem (maximum number
of candidate solutions to consider). The following special
values are recognized:
max_work=MAY_SHARE_EXACT (default)
The problem is solved exactly. In this case, the function returns
True only if there is an element shared between the arrays.
max_work=MAY_SHARE_BOUNDS
Only the memory bounds of a and b are checked.
Raises
------
numpy.TooHardError
Exceeded max_work.
Returns
-------
out : bool
See Also
--------
may_share_memory
Examples
--------
>>> np.may_share_memory(np.array([1,2]), np.array([5,8,9]))
False
""")
add_newdoc('numpy.core.multiarray', 'may_share_memory',
"""
may_share_memory(a, b, max_work=None)
Determine if two arrays might share memory
A return of True does not necessarily mean that the two arrays
share any element. It just means that they *might*.
Only the memory bounds of a and b are checked by default.
Parameters
----------
a, b : ndarray
Input arrays
max_work : int, optional
Effort to spend on solving the overlap problem. See
`shares_memory` for details. Default for ``may_share_memory``
is to do a bounds check.
Returns
-------
out : bool
See Also
--------
shares_memory
Examples
--------
>>> np.may_share_memory(np.array([1,2]), np.array([5,8,9]))
False
>>> x = np.zeros([3, 4])
>>> np.may_share_memory(x[:,0], x[:,1])
True
""")
add_newdoc('numpy.core.multiarray', 'ndarray', ('newbyteorder',
"""
arr.newbyteorder(new_order='S')
Return the array with the same data viewed with a different byte order.
Equivalent to::
arr.view(arr.dtype.newbytorder(new_order))
Changes are also made in all fields and sub-arrays of the array data
type.
Parameters
----------
new_order : string, optional
Byte order to force; a value from the byte order specifications
below. `new_order` codes can be any of:
* 'S' - swap dtype from current to opposite endian
* {'<', 'L'} - little endian
* {'>', 'B'} - big endian
* {'=', 'N'} - native order
* {'|', 'I'} - ignore (no change to byte order)
The default value ('S') results in swapping the current
byte order. The code does a case-insensitive check on the first
letter of `new_order` for the alternatives above. For example,
any of 'B' or 'b' or 'biggish' are valid to specify big-endian.
Returns
-------
new_arr : array
New array object with the dtype reflecting given change to the
byte order.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('nonzero',
"""
a.nonzero()
Return the indices of the elements that are non-zero.
Refer to `numpy.nonzero` for full documentation.
See Also
--------
numpy.nonzero : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('prod',
"""
a.prod(axis=None, dtype=None, out=None, keepdims=False)
Return the product of the array elements over the given axis
Refer to `numpy.prod` for full documentation.
See Also
--------
numpy.prod : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('ptp',
"""
a.ptp(axis=None, out=None)
Peak to peak (maximum - minimum) value along a given axis.
Refer to `numpy.ptp` for full documentation.
See Also
--------
numpy.ptp : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('put',
"""
a.put(indices, values, mode='raise')
Set ``a.flat[n] = values[n]`` for all `n` in indices.
Refer to `numpy.put` for full documentation.
See Also
--------
numpy.put : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'copyto',
"""
copyto(dst, src, casting='same_kind', where=None)
Copies values from one array to another, broadcasting as necessary.
Raises a TypeError if the `casting` rule is violated, and if
`where` is provided, it selects which elements to copy.
.. versionadded:: 1.7.0
Parameters
----------
dst : ndarray
The array into which values are copied.
src : array_like
The array from which values are copied.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur when copying.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
where : array_like of bool, optional
A boolean array which is broadcasted to match the dimensions
of `dst`, and selects elements to copy from `src` to `dst`
wherever it contains the value True.
""")
add_newdoc('numpy.core.multiarray', 'putmask',
"""
putmask(a, mask, values)
Changes elements of an array based on conditional and input values.
Sets ``a.flat[n] = values[n]`` for each n where ``mask.flat[n]==True``.
If `values` is not the same size as `a` and `mask` then it will repeat.
This gives behavior different from ``a[mask] = values``.
Parameters
----------
a : array_like
Target array.
mask : array_like
Boolean mask array. It has to be the same shape as `a`.
values : array_like
Values to put into `a` where `mask` is True. If `values` is smaller
than `a` it will be repeated.
See Also
--------
place, put, take, copyto
Examples
--------
>>> x = np.arange(6).reshape(2, 3)
>>> np.putmask(x, x>2, x**2)
>>> x
array([[ 0, 1, 2],
[ 9, 16, 25]])
If `values` is smaller than `a` it is repeated:
>>> x = np.arange(5)
>>> np.putmask(x, x>1, [-33, -44])
>>> x
array([ 0, 1, -33, -44, -33])
""")
add_newdoc('numpy.core.multiarray', 'ndarray', ('ravel',
"""
a.ravel([order])
Return a flattened array.
Refer to `numpy.ravel` for full documentation.
See Also
--------
numpy.ravel : equivalent function
ndarray.flat : a flat iterator on the array.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('repeat',
"""
a.repeat(repeats, axis=None)
Repeat elements of an array.
Refer to `numpy.repeat` for full documentation.
See Also
--------
numpy.repeat : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('reshape',
"""
a.reshape(shape, order='C')
Returns an array containing the same data with a new shape.
Refer to `numpy.reshape` for full documentation.
See Also
--------
numpy.reshape : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('resize',
"""
a.resize(new_shape, refcheck=True)
Change shape and size of array in-place.
Parameters
----------
new_shape : tuple of ints, or `n` ints
Shape of resized array.
refcheck : bool, optional
If False, reference count will not be checked. Default is True.
Returns
-------
None
Raises
------
ValueError
If `a` does not own its own data or references or views to it exist,
and the data memory must be changed.
PyPy only: will always raise if the data memory must be changed, since
there is no reliable way to determine if references or views to it
exist.
SystemError
If the `order` keyword argument is specified. This behaviour is a
bug in NumPy.
See Also
--------
resize : Return a new array with the specified shape.
Notes
-----
This reallocates space for the data area if necessary.
Only contiguous arrays (data elements consecutive in memory) can be
resized.
The purpose of the reference count check is to make sure you
do not use this array as a buffer for another Python object and then
reallocate the memory. However, reference counts can increase in
other ways so if you are sure that you have not shared the memory
for this array with another Python object, then you may safely set
`refcheck` to False.
Examples
--------
Shrinking an array: array is flattened (in the order that the data are
stored in memory), resized, and reshaped:
>>> a = np.array([[0, 1], [2, 3]], order='C')
>>> a.resize((2, 1))
>>> a
array([[0],
[1]])
>>> a = np.array([[0, 1], [2, 3]], order='F')
>>> a.resize((2, 1))
>>> a
array([[0],
[2]])
Enlarging an array: as above, but missing entries are filled with zeros:
>>> b = np.array([[0, 1], [2, 3]])
>>> b.resize(2, 3) # new_shape parameter doesn't have to be a tuple
>>> b
array([[0, 1, 2],
[3, 0, 0]])
Referencing an array prevents resizing...
>>> c = a
>>> a.resize((1, 1))
Traceback (most recent call last):
...
ValueError: cannot resize an array that has been referenced ...
Unless `refcheck` is False:
>>> a.resize((1, 1), refcheck=False)
>>> a
array([[0]])
>>> c
array([[0]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('round',
"""
a.round(decimals=0, out=None)
Return `a` with each element rounded to the given number of decimals.
Refer to `numpy.around` for full documentation.
See Also
--------
numpy.around : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('searchsorted',
"""
a.searchsorted(v, side='left', sorter=None)
Find indices where elements of v should be inserted in a to maintain order.
For full documentation, see `numpy.searchsorted`
See Also
--------
numpy.searchsorted : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('setfield',
"""
a.setfield(val, dtype, offset=0)
Put a value into a specified place in a field defined by a data-type.
Place `val` into `a`'s field defined by `dtype` and beginning `offset`
bytes into the field.
Parameters
----------
val : object
Value to be placed in field.
dtype : dtype object
Data-type of the field in which to place `val`.
offset : int, optional
The number of bytes into the field at which to place `val`.
Returns
-------
None
See Also
--------
getfield
Examples
--------
>>> x = np.eye(3)
>>> x.getfield(np.float64)
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
>>> x.setfield(3, np.int32)
>>> x.getfield(np.int32)
array([[3, 3, 3],
[3, 3, 3],
[3, 3, 3]])
>>> x
array([[ 1.00000000e+000, 1.48219694e-323, 1.48219694e-323],
[ 1.48219694e-323, 1.00000000e+000, 1.48219694e-323],
[ 1.48219694e-323, 1.48219694e-323, 1.00000000e+000]])
>>> x.setfield(np.eye(3), np.int32)
>>> x
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('setflags',
"""
a.setflags(write=None, align=None, uic=None)
Set array flags WRITEABLE, ALIGNED, and UPDATEIFCOPY, respectively.
These Boolean-valued flags affect how numpy interprets the memory
area used by `a` (see Notes below). The ALIGNED flag can only
be set to True if the data is actually aligned according to the type.
The UPDATEIFCOPY flag can never be set to True. The flag WRITEABLE
can only be set to True if the array owns its own memory, or the
ultimate owner of the memory exposes a writeable buffer interface,
or is a string. (The exception for string is made so that unpickling
can be done without copying memory.)
Parameters
----------
write : bool, optional
Describes whether or not `a` can be written to.
align : bool, optional
Describes whether or not `a` is aligned properly for its type.
uic : bool, optional
Describes whether or not `a` is a copy of another "base" array.
Notes
-----
Array flags provide information about how the memory area used
for the array is to be interpreted. There are 6 Boolean flags
in use, only three of which can be changed by the user:
UPDATEIFCOPY, WRITEABLE, and ALIGNED.
WRITEABLE (W) the data area can be written to;
ALIGNED (A) the data and strides are aligned appropriately for the hardware
(as determined by the compiler);
UPDATEIFCOPY (U) this array is a copy of some other array (referenced
by .base). When this array is deallocated, the base array will be
updated with the contents of this array.
All flags can be accessed using their first (upper case) letter as well
as the full name.
Examples
--------
>>> y
array([[3, 1, 7],
[2, 0, 0],
[8, 5, 9]])
>>> y.flags
C_CONTIGUOUS : True
F_CONTIGUOUS : False
OWNDATA : True
WRITEABLE : True
ALIGNED : True
UPDATEIFCOPY : False
>>> y.setflags(write=0, align=0)
>>> y.flags
C_CONTIGUOUS : True
F_CONTIGUOUS : False
OWNDATA : True
WRITEABLE : False
ALIGNED : False
UPDATEIFCOPY : False
>>> y.setflags(uic=1)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: cannot set UPDATEIFCOPY flag to True
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('sort',
"""
a.sort(axis=-1, kind='quicksort', order=None)
Sort an array, in-place.
Parameters
----------
axis : int, optional
Axis along which to sort. Default is -1, which means sort along the
last axis.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm. Default is 'quicksort'.
order : str or list of str, optional
When `a` is an array with fields defined, this argument specifies
which fields to compare first, second, etc. A single field can
be specified as a string, and not all fields need be specified,
but unspecified fields will still be used, in the order in which
they come up in the dtype, to break ties.
See Also
--------
numpy.sort : Return a sorted copy of an array.
argsort : Indirect sort.
lexsort : Indirect stable sort on multiple keys.
searchsorted : Find elements in sorted array.
partition: Partial sort.
Notes
-----
See ``sort`` for notes on the different sorting algorithms.
Examples
--------
>>> a = np.array([[1,4], [3,1]])
>>> a.sort(axis=1)
>>> a
array([[1, 4],
[1, 3]])
>>> a.sort(axis=0)
>>> a
array([[1, 3],
[1, 4]])
Use the `order` keyword to specify a field to use when sorting a
structured array:
>>> a = np.array([('a', 2), ('c', 1)], dtype=[('x', 'S1'), ('y', int)])
>>> a.sort(order='y')
>>> a
array([('c', 1), ('a', 2)],
dtype=[('x', '|S1'), ('y', '<i4')])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('partition',
"""
a.partition(kth, axis=-1, kind='introselect', order=None)
Rearranges the elements in the array in such a way that value of the
element in kth position is in the position it would be in a sorted array.
All elements smaller than the kth element are moved before this element and
all equal or greater are moved behind it. The ordering of the elements in
the two partitions is undefined.
.. versionadded:: 1.8.0
Parameters
----------
kth : int or sequence of ints
Element index to partition by. The kth element value will be in its
final sorted position and all smaller elements will be moved before it
and all equal or greater elements behind it.
The order all elements in the partitions is undefined.
If provided with a sequence of kth it will partition all elements
indexed by kth of them into their sorted position at once.
axis : int, optional
Axis along which to sort. Default is -1, which means sort along the
last axis.
kind : {'introselect'}, optional
Selection algorithm. Default is 'introselect'.
order : str or list of str, optional
When `a` is an array with fields defined, this argument specifies
which fields to compare first, second, etc. A single field can
be specified as a string, and not all fields need be specified,
but unspecified fields will still be used, in the order in which
they come up in the dtype, to break ties.
See Also
--------
numpy.partition : Return a parititioned copy of an array.
argpartition : Indirect partition.
sort : Full sort.
Notes
-----
See ``np.partition`` for notes on the different algorithms.
Examples
--------
>>> a = np.array([3, 4, 2, 1])
>>> a.partition(3)
>>> a
array([2, 1, 3, 4])
>>> a.partition((1, 3))
array([1, 2, 3, 4])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('squeeze',
"""
a.squeeze(axis=None)
Remove single-dimensional entries from the shape of `a`.
Refer to `numpy.squeeze` for full documentation.
See Also
--------
numpy.squeeze : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('std',
"""
a.std(axis=None, dtype=None, out=None, ddof=0, keepdims=False)
Returns the standard deviation of the array elements along given axis.
Refer to `numpy.std` for full documentation.
See Also
--------
numpy.std : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('sum',
"""
a.sum(axis=None, dtype=None, out=None, keepdims=False)
Return the sum of the array elements over the given axis.
Refer to `numpy.sum` for full documentation.
See Also
--------
numpy.sum : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('swapaxes',
"""
a.swapaxes(axis1, axis2)
Return a view of the array with `axis1` and `axis2` interchanged.
Refer to `numpy.swapaxes` for full documentation.
See Also
--------
numpy.swapaxes : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('take',
"""
a.take(indices, axis=None, out=None, mode='raise')
Return an array formed from the elements of `a` at the given indices.
Refer to `numpy.take` for full documentation.
See Also
--------
numpy.take : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('tofile',
"""
a.tofile(fid, sep="", format="%s")
Write array to a file as text or binary (default).
Data is always written in 'C' order, independent of the order of `a`.
The data produced by this method can be recovered using the function
fromfile().
Parameters
----------
fid : file or str
An open file object, or a string containing a filename.
sep : str
Separator between array items for text output.
If "" (empty), a binary file is written, equivalent to
``file.write(a.tobytes())``.
format : str
Format string for text file output.
Each entry in the array is formatted to text by first converting
it to the closest Python type, and then using "format" % item.
Notes
-----
This is a convenience function for quick storage of array data.
Information on endianness and precision is lost, so this method is not a
good choice for files intended to archive data or transport data between
machines with different endianness. Some of these problems can be overcome
by outputting the data as text files, at the expense of speed and file
size.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('tolist',
"""
a.tolist()
Return the array as a (possibly nested) list.
Return a copy of the array data as a (nested) Python list.
Data items are converted to the nearest compatible Python type.
Parameters
----------
none
Returns
-------
y : list
The possibly nested list of array elements.
Notes
-----
The array may be recreated, ``a = np.array(a.tolist())``.
Examples
--------
>>> a = np.array([1, 2])
>>> a.tolist()
[1, 2]
>>> a = np.array([[1, 2], [3, 4]])
>>> list(a)
[array([1, 2]), array([3, 4])]
>>> a.tolist()
[[1, 2], [3, 4]]
"""))
tobytesdoc = """
a.{name}(order='C')
Construct Python bytes containing the raw data bytes in the array.
Constructs Python bytes showing a copy of the raw contents of
data memory. The bytes object can be produced in either 'C' or 'Fortran',
or 'Any' order (the default is 'C'-order). 'Any' order means C-order
unless the F_CONTIGUOUS flag in the array is set, in which case it
means 'Fortran' order.
{deprecated}
Parameters
----------
order : {{'C', 'F', None}}, optional
Order of the data for multidimensional arrays:
C, Fortran, or the same as for the original array.
Returns
-------
s : bytes
Python bytes exhibiting a copy of `a`'s raw data.
Examples
--------
>>> x = np.array([[0, 1], [2, 3]])
>>> x.tobytes()
b'\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x03\\x00\\x00\\x00'
>>> x.tobytes('C') == x.tobytes()
True
>>> x.tobytes('F')
b'\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x03\\x00\\x00\\x00'
"""
add_newdoc('numpy.core.multiarray', 'ndarray',
('tostring', tobytesdoc.format(name='tostring',
deprecated=
'This function is a compatibility '
'alias for tobytes. Despite its '
'name it returns bytes not '
'strings.')))
add_newdoc('numpy.core.multiarray', 'ndarray',
('tobytes', tobytesdoc.format(name='tobytes',
deprecated='.. versionadded:: 1.9.0')))
add_newdoc('numpy.core.multiarray', 'ndarray', ('trace',
"""
a.trace(offset=0, axis1=0, axis2=1, dtype=None, out=None)
Return the sum along diagonals of the array.
Refer to `numpy.trace` for full documentation.
See Also
--------
numpy.trace : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('transpose',
"""
a.transpose(*axes)
Returns a view of the array with axes transposed.
For a 1-D array, this has no effect. (To change between column and
row vectors, first cast the 1-D array into a matrix object.)
For a 2-D array, this is the usual matrix transpose.
For an n-D array, if axes are given, their order indicates how the
axes are permuted (see Examples). If axes are not provided and
``a.shape = (i[0], i[1], ... i[n-2], i[n-1])``, then
``a.transpose().shape = (i[n-1], i[n-2], ... i[1], i[0])``.
Parameters
----------
axes : None, tuple of ints, or `n` ints
* None or no argument: reverses the order of the axes.
* tuple of ints: `i` in the `j`-th place in the tuple means `a`'s
`i`-th axis becomes `a.transpose()`'s `j`-th axis.
* `n` ints: same as an n-tuple of the same ints (this form is
intended simply as a "convenience" alternative to the tuple form)
Returns
-------
out : ndarray
View of `a`, with axes suitably permuted.
See Also
--------
ndarray.T : Array property returning the array transposed.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> a
array([[1, 2],
[3, 4]])
>>> a.transpose()
array([[1, 3],
[2, 4]])
>>> a.transpose((1, 0))
array([[1, 3],
[2, 4]])
>>> a.transpose(1, 0)
array([[1, 3],
[2, 4]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('var',
"""
a.var(axis=None, dtype=None, out=None, ddof=0, keepdims=False)
Returns the variance of the array elements, along given axis.
Refer to `numpy.var` for full documentation.
See Also
--------
numpy.var : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('view',
"""
a.view(dtype=None, type=None)
New view of array with the same data.
Parameters
----------
dtype : data-type or ndarray sub-class, optional
Data-type descriptor of the returned view, e.g., float32 or int16. The
default, None, results in the view having the same data-type as `a`.
This argument can also be specified as an ndarray sub-class, which
then specifies the type of the returned object (this is equivalent to
setting the ``type`` parameter).
type : Python type, optional
Type of the returned view, e.g., ndarray or matrix. Again, the
default None results in type preservation.
Notes
-----
``a.view()`` is used two different ways:
``a.view(some_dtype)`` or ``a.view(dtype=some_dtype)`` constructs a view
of the array's memory with a different data-type. This can cause a
reinterpretation of the bytes of memory.
``a.view(ndarray_subclass)`` or ``a.view(type=ndarray_subclass)`` just
returns an instance of `ndarray_subclass` that looks at the same array
(same shape, dtype, etc.) This does not cause a reinterpretation of the
memory.
For ``a.view(some_dtype)``, if ``some_dtype`` has a different number of
bytes per entry than the previous dtype (for example, converting a
regular array to a structured array), then the behavior of the view
cannot be predicted just from the superficial appearance of ``a`` (shown
by ``print(a)``). It also depends on exactly how ``a`` is stored in
memory. Therefore if ``a`` is C-ordered versus fortran-ordered, versus
defined as a slice or transpose, etc., the view may give different
results.
Examples
--------
>>> x = np.array([(1, 2)], dtype=[('a', np.int8), ('b', np.int8)])
Viewing array data using a different type and dtype:
>>> y = x.view(dtype=np.int16, type=np.matrix)
>>> y
matrix([[513]], dtype=int16)
>>> print(type(y))
<class 'numpy.matrixlib.defmatrix.matrix'>
Creating a view on a structured array so it can be used in calculations
>>> x = np.array([(1, 2),(3,4)], dtype=[('a', np.int8), ('b', np.int8)])
>>> xv = x.view(dtype=np.int8).reshape(-1,2)
>>> xv
array([[1, 2],
[3, 4]], dtype=int8)
>>> xv.mean(0)
array([ 2., 3.])
Making changes to the view changes the underlying array
>>> xv[0,1] = 20
>>> print(x)
[(1, 20) (3, 4)]
Using a view to convert an array to a recarray:
>>> z = x.view(np.recarray)
>>> z.a
array([1], dtype=int8)
Views share data:
>>> x[0] = (9, 10)
>>> z[0]
(9, 10)
Views that change the dtype size (bytes per entry) should normally be
avoided on arrays defined by slices, transposes, fortran-ordering, etc.:
>>> x = np.array([[1,2,3],[4,5,6]], dtype=np.int16)
>>> y = x[:, 0:2]
>>> y
array([[1, 2],
[4, 5]], dtype=int16)
>>> y.view(dtype=[('width', np.int16), ('length', np.int16)])
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: new type not compatible with array.
>>> z = y.copy()
>>> z.view(dtype=[('width', np.int16), ('length', np.int16)])
array([[(1, 2)],
[(4, 5)]], dtype=[('width', '<i2'), ('length', '<i2')])
"""))
##############################################################################
#
# umath functions
#
##############################################################################
add_newdoc('numpy.core.umath', 'frompyfunc',
"""
frompyfunc(func, nin, nout)
Takes an arbitrary Python function and returns a NumPy ufunc.
Can be used, for example, to add broadcasting to a built-in Python
function (see Examples section).
Parameters
----------
func : Python function object
An arbitrary Python function.
nin : int
The number of input arguments.
nout : int
The number of objects returned by `func`.
Returns
-------
out : ufunc
Returns a NumPy universal function (``ufunc``) object.
See Also
--------
vectorize : evaluates pyfunc over input arrays using broadcasting rules of numpy
Notes
-----
The returned ufunc always returns PyObject arrays.
Examples
--------
Use frompyfunc to add broadcasting to the Python function ``oct``:
>>> oct_array = np.frompyfunc(oct, 1, 1)
>>> oct_array(np.array((10, 30, 100)))
array([012, 036, 0144], dtype=object)
>>> np.array((oct(10), oct(30), oct(100))) # for comparison
array(['012', '036', '0144'],
dtype='|S4')
""")
add_newdoc('numpy.core.umath', 'geterrobj',
"""
geterrobj()
Return the current object that defines floating-point error handling.
The error object contains all information that defines the error handling
behavior in NumPy. `geterrobj` is used internally by the other
functions that get and set error handling behavior (`geterr`, `seterr`,
`geterrcall`, `seterrcall`).
Returns
-------
errobj : list
The error object, a list containing three elements:
[internal numpy buffer size, error mask, error callback function].
The error mask is a single integer that holds the treatment information
on all four floating point errors. The information for each error type
is contained in three bits of the integer. If we print it in base 8, we
can see what treatment is set for "invalid", "under", "over", and
"divide" (in that order). The printed string can be interpreted with
* 0 : 'ignore'
* 1 : 'warn'
* 2 : 'raise'
* 3 : 'call'
* 4 : 'print'
* 5 : 'log'
See Also
--------
seterrobj, seterr, geterr, seterrcall, geterrcall
getbufsize, setbufsize
Notes
-----
For complete documentation of the types of floating-point exceptions and
treatment options, see `seterr`.
Examples
--------
>>> np.geterrobj() # first get the defaults
[10000, 0, None]
>>> def err_handler(type, flag):
... print("Floating point error (%s), with flag %s" % (type, flag))
...
>>> old_bufsize = np.setbufsize(20000)
>>> old_err = np.seterr(divide='raise')
>>> old_handler = np.seterrcall(err_handler)
>>> np.geterrobj()
[20000, 2, <function err_handler at 0x91dcaac>]
>>> old_err = np.seterr(all='ignore')
>>> np.base_repr(np.geterrobj()[1], 8)
'0'
>>> old_err = np.seterr(divide='warn', over='log', under='call',
invalid='print')
>>> np.base_repr(np.geterrobj()[1], 8)
'4351'
""")
add_newdoc('numpy.core.umath', 'seterrobj',
"""
seterrobj(errobj)
Set the object that defines floating-point error handling.
The error object contains all information that defines the error handling
behavior in NumPy. `seterrobj` is used internally by the other
functions that set error handling behavior (`seterr`, `seterrcall`).
Parameters
----------
errobj : list
The error object, a list containing three elements:
[internal numpy buffer size, error mask, error callback function].
The error mask is a single integer that holds the treatment information
on all four floating point errors. The information for each error type
is contained in three bits of the integer. If we print it in base 8, we
can see what treatment is set for "invalid", "under", "over", and
"divide" (in that order). The printed string can be interpreted with
* 0 : 'ignore'
* 1 : 'warn'
* 2 : 'raise'
* 3 : 'call'
* 4 : 'print'
* 5 : 'log'
See Also
--------
geterrobj, seterr, geterr, seterrcall, geterrcall
getbufsize, setbufsize
Notes
-----
For complete documentation of the types of floating-point exceptions and
treatment options, see `seterr`.
Examples
--------
>>> old_errobj = np.geterrobj() # first get the defaults
>>> old_errobj
[10000, 0, None]
>>> def err_handler(type, flag):
... print("Floating point error (%s), with flag %s" % (type, flag))
...
>>> new_errobj = [20000, 12, err_handler]
>>> np.seterrobj(new_errobj)
>>> np.base_repr(12, 8) # int for divide=4 ('print') and over=1 ('warn')
'14'
>>> np.geterr()
{'over': 'warn', 'divide': 'print', 'invalid': 'ignore', 'under': 'ignore'}
>>> np.geterrcall() is err_handler
True
""")
##############################################################################
#
# compiled_base functions
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'digitize',
"""
digitize(x, bins, right=False)
Return the indices of the bins to which each value in input array belongs.
Each index ``i`` returned is such that ``bins[i-1] <= x < bins[i]`` if
`bins` is monotonically increasing, or ``bins[i-1] > x >= bins[i]`` if
`bins` is monotonically decreasing. If values in `x` are beyond the
bounds of `bins`, 0 or ``len(bins)`` is returned as appropriate. If right
is True, then the right bin is closed so that the index ``i`` is such
that ``bins[i-1] < x <= bins[i]`` or bins[i-1] >= x > bins[i]`` if `bins`
is monotonically increasing or decreasing, respectively.
Parameters
----------
x : array_like
Input array to be binned. Prior to NumPy 1.10.0, this array had to
be 1-dimensional, but can now have any shape.
bins : array_like
Array of bins. It has to be 1-dimensional and monotonic.
right : bool, optional
Indicating whether the intervals include the right or the left bin
edge. Default behavior is (right==False) indicating that the interval
does not include the right edge. The left bin end is open in this
case, i.e., bins[i-1] <= x < bins[i] is the default behavior for
monotonically increasing bins.
Returns
-------
out : ndarray of ints
Output array of indices, of same shape as `x`.
Raises
------
ValueError
If `bins` is not monotonic.
TypeError
If the type of the input is complex.
See Also
--------
bincount, histogram, unique
Notes
-----
If values in `x` are such that they fall outside the bin range,
attempting to index `bins` with the indices that `digitize` returns
will result in an IndexError.
.. versionadded:: 1.10.0
`np.digitize` is implemented in terms of `np.searchsorted`. This means
that a binary search is used to bin the values, which scales much better
for larger number of bins than the previous linear search. It also removes
the requirement for the input array to be 1-dimensional.
Examples
--------
>>> x = np.array([0.2, 6.4, 3.0, 1.6])
>>> bins = np.array([0.0, 1.0, 2.5, 4.0, 10.0])
>>> inds = np.digitize(x, bins)
>>> inds
array([1, 4, 3, 2])
>>> for n in range(x.size):
... print(bins[inds[n]-1], "<=", x[n], "<", bins[inds[n]])
...
0.0 <= 0.2 < 1.0
4.0 <= 6.4 < 10.0
2.5 <= 3.0 < 4.0
1.0 <= 1.6 < 2.5
>>> x = np.array([1.2, 10.0, 12.4, 15.5, 20.])
>>> bins = np.array([0, 5, 10, 15, 20])
>>> np.digitize(x,bins,right=True)
array([1, 2, 3, 4, 4])
>>> np.digitize(x,bins,right=False)
array([1, 3, 3, 4, 5])
""")
add_newdoc('numpy.core.multiarray', 'bincount',
"""
bincount(x, weights=None, minlength=None)
Count number of occurrences of each value in array of non-negative ints.
The number of bins (of size 1) is one larger than the largest value in
`x`. If `minlength` is specified, there will be at least this number
of bins in the output array (though it will be longer if necessary,
depending on the contents of `x`).
Each bin gives the number of occurrences of its index value in `x`.
If `weights` is specified the input array is weighted by it, i.e. if a
value ``n`` is found at position ``i``, ``out[n] += weight[i]`` instead
of ``out[n] += 1``.
Parameters
----------
x : array_like, 1 dimension, nonnegative ints
Input array.
weights : array_like, optional
Weights, array of the same shape as `x`.
minlength : int, optional
A minimum number of bins for the output array.
.. versionadded:: 1.6.0
Returns
-------
out : ndarray of ints
The result of binning the input array.
The length of `out` is equal to ``np.amax(x)+1``.
Raises
------
ValueError
If the input is not 1-dimensional, or contains elements with negative
values, or if `minlength` is non-positive.
TypeError
If the type of the input is float or complex.
See Also
--------
histogram, digitize, unique
Examples
--------
>>> np.bincount(np.arange(5))
array([1, 1, 1, 1, 1])
>>> np.bincount(np.array([0, 1, 1, 3, 2, 1, 7]))
array([1, 3, 1, 1, 0, 0, 0, 1])
>>> x = np.array([0, 1, 1, 3, 2, 1, 7, 23])
>>> np.bincount(x).size == np.amax(x)+1
True
The input array needs to be of integer dtype, otherwise a
TypeError is raised:
>>> np.bincount(np.arange(5, dtype=np.float))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: array cannot be safely cast to required type
A possible use of ``bincount`` is to perform sums over
variable-size chunks of an array, using the ``weights`` keyword.
>>> w = np.array([0.3, 0.5, 0.2, 0.7, 1., -0.6]) # weights
>>> x = np.array([0, 1, 1, 2, 2, 2])
>>> np.bincount(x, weights=w)
array([ 0.3, 0.7, 1.1])
""")
add_newdoc('numpy.core.multiarray', 'ravel_multi_index',
"""
ravel_multi_index(multi_index, dims, mode='raise', order='C')
Converts a tuple of index arrays into an array of flat
indices, applying boundary modes to the multi-index.
Parameters
----------
multi_index : tuple of array_like
A tuple of integer arrays, one array for each dimension.
dims : tuple of ints
The shape of array into which the indices from ``multi_index`` apply.
mode : {'raise', 'wrap', 'clip'}, optional
Specifies how out-of-bounds indices are handled. Can specify
either one mode or a tuple of modes, one mode per index.
* 'raise' -- raise an error (default)
* 'wrap' -- wrap around
* 'clip' -- clip to the range
In 'clip' mode, a negative index which would normally
wrap will clip to 0 instead.
order : {'C', 'F'}, optional
Determines whether the multi-index should be viewed as
indexing in row-major (C-style) or column-major
(Fortran-style) order.
Returns
-------
raveled_indices : ndarray
An array of indices into the flattened version of an array
of dimensions ``dims``.
See Also
--------
unravel_index
Notes
-----
.. versionadded:: 1.6.0
Examples
--------
>>> arr = np.array([[3,6,6],[4,5,1]])
>>> np.ravel_multi_index(arr, (7,6))
array([22, 41, 37])
>>> np.ravel_multi_index(arr, (7,6), order='F')
array([31, 41, 13])
>>> np.ravel_multi_index(arr, (4,6), mode='clip')
array([22, 23, 19])
>>> np.ravel_multi_index(arr, (4,4), mode=('clip','wrap'))
array([12, 13, 13])
>>> np.ravel_multi_index((3,1,4,1), (6,7,8,9))
1621
""")
add_newdoc('numpy.core.multiarray', 'unravel_index',
"""
unravel_index(indices, dims, order='C')
Converts a flat index or array of flat indices into a tuple
of coordinate arrays.
Parameters
----------
indices : array_like
An integer array whose elements are indices into the flattened
version of an array of dimensions ``dims``. Before version 1.6.0,
this function accepted just one index value.
dims : tuple of ints
The shape of the array to use for unraveling ``indices``.
order : {'C', 'F'}, optional
Determines whether the indices should be viewed as indexing in
row-major (C-style) or column-major (Fortran-style) order.
.. versionadded:: 1.6.0
Returns
-------
unraveled_coords : tuple of ndarray
Each array in the tuple has the same shape as the ``indices``
array.
See Also
--------
ravel_multi_index
Examples
--------
>>> np.unravel_index([22, 41, 37], (7,6))
(array([3, 6, 6]), array([4, 5, 1]))
>>> np.unravel_index([31, 41, 13], (7,6), order='F')
(array([3, 6, 6]), array([4, 5, 1]))
>>> np.unravel_index(1621, (6,7,8,9))
(3, 1, 4, 1)
""")
add_newdoc('numpy.core.multiarray', 'add_docstring',
"""
add_docstring(obj, docstring)
Add a docstring to a built-in obj if possible.
If the obj already has a docstring raise a RuntimeError
If this routine does not know how to add a docstring to the object
raise a TypeError
""")
add_newdoc('numpy.core.umath', '_add_newdoc_ufunc',
"""
add_ufunc_docstring(ufunc, new_docstring)
Replace the docstring for a ufunc with new_docstring.
This method will only work if the current docstring for
the ufunc is NULL. (At the C level, i.e. when ufunc->doc is NULL.)
Parameters
----------
ufunc : numpy.ufunc
A ufunc whose current doc is NULL.
new_docstring : string
The new docstring for the ufunc.
Notes
-----
This method allocates memory for new_docstring on
the heap. Technically this creates a mempory leak, since this
memory will not be reclaimed until the end of the program
even if the ufunc itself is removed. However this will only
be a problem if the user is repeatedly creating ufuncs with
no documentation, adding documentation via add_newdoc_ufunc,
and then throwing away the ufunc.
""")
add_newdoc('numpy.core.multiarray', 'packbits',
"""
packbits(myarray, axis=None)
Packs the elements of a binary-valued array into bits in a uint8 array.
The result is padded to full bytes by inserting zero bits at the end.
Parameters
----------
myarray : array_like
An integer type array whose elements should be packed to bits.
axis : int, optional
The dimension over which bit-packing is done.
``None`` implies packing the flattened array.
Returns
-------
packed : ndarray
Array of type uint8 whose elements represent bits corresponding to the
logical (0 or nonzero) value of the input elements. The shape of
`packed` has the same number of dimensions as the input (unless `axis`
is None, in which case the output is 1-D).
See Also
--------
unpackbits: Unpacks elements of a uint8 array into a binary-valued output
array.
Examples
--------
>>> a = np.array([[[1,0,1],
... [0,1,0]],
... [[1,1,0],
... [0,0,1]]])
>>> b = np.packbits(a, axis=-1)
>>> b
array([[[160],[64]],[[192],[32]]], dtype=uint8)
Note that in binary 160 = 1010 0000, 64 = 0100 0000, 192 = 1100 0000,
and 32 = 0010 0000.
""")
add_newdoc('numpy.core.multiarray', 'unpackbits',
"""
unpackbits(myarray, axis=None)
Unpacks elements of a uint8 array into a binary-valued output array.
Each element of `myarray` represents a bit-field that should be unpacked
into a binary-valued output array. The shape of the output array is either
1-D (if `axis` is None) or the same shape as the input array with unpacking
done along the axis specified.
Parameters
----------
myarray : ndarray, uint8 type
Input array.
axis : int, optional
Unpacks along this axis.
Returns
-------
unpacked : ndarray, uint8 type
The elements are binary-valued (0 or 1).
See Also
--------
packbits : Packs the elements of a binary-valued array into bits in a uint8
array.
Examples
--------
>>> a = np.array([[2], [7], [23]], dtype=np.uint8)
>>> a
array([[ 2],
[ 7],
[23]], dtype=uint8)
>>> b = np.unpackbits(a, axis=1)
>>> b
array([[0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 0, 1, 1, 1]], dtype=uint8)
""")
##############################################################################
#
# Documentation for ufunc attributes and methods
#
##############################################################################
##############################################################################
#
# ufunc object
#
##############################################################################
add_newdoc('numpy.core', 'ufunc',
"""
Functions that operate element by element on whole arrays.
To see the documentation for a specific ufunc, use np.info(). For
example, np.info(np.sin). Because ufuncs are written in C
(for speed) and linked into Python with NumPy's ufunc facility,
Python's help() function finds this page whenever help() is called
on a ufunc.
A detailed explanation of ufuncs can be found in the "ufuncs.rst"
file in the NumPy reference guide.
Unary ufuncs:
=============
op(X, out=None)
Apply op to X elementwise
Parameters
----------
X : array_like
Input array.
out : array_like
An array to store the output. Must be the same shape as `X`.
Returns
-------
r : array_like
`r` will have the same shape as `X`; if out is provided, `r`
will be equal to out.
Binary ufuncs:
==============
op(X, Y, out=None)
Apply `op` to `X` and `Y` elementwise. May "broadcast" to make
the shapes of `X` and `Y` congruent.
The broadcasting rules are:
* Dimensions of length 1 may be prepended to either array.
* Arrays may be repeated along dimensions of length 1.
Parameters
----------
X : array_like
First input array.
Y : array_like
Second input array.
out : array_like
An array to store the output. Must be the same shape as the
output would have.
Returns
-------
r : array_like
The return value; if out is provided, `r` will be equal to out.
""")
##############################################################################
#
# ufunc attributes
#
##############################################################################
add_newdoc('numpy.core', 'ufunc', ('identity',
"""
The identity value.
Data attribute containing the identity element for the ufunc, if it has one.
If it does not, the attribute value is None.
Examples
--------
>>> np.add.identity
0
>>> np.multiply.identity
1
>>> np.power.identity
1
>>> print(np.exp.identity)
None
"""))
add_newdoc('numpy.core', 'ufunc', ('nargs',
"""
The number of arguments.
Data attribute containing the number of arguments the ufunc takes, including
optional ones.
Notes
-----
Typically this value will be one more than what you might expect because all
ufuncs take the optional "out" argument.
Examples
--------
>>> np.add.nargs
3
>>> np.multiply.nargs
3
>>> np.power.nargs
3
>>> np.exp.nargs
2
"""))
add_newdoc('numpy.core', 'ufunc', ('nin',
"""
The number of inputs.
Data attribute containing the number of arguments the ufunc treats as input.
Examples
--------
>>> np.add.nin
2
>>> np.multiply.nin
2
>>> np.power.nin
2
>>> np.exp.nin
1
"""))
add_newdoc('numpy.core', 'ufunc', ('nout',
"""
The number of outputs.
Data attribute containing the number of arguments the ufunc treats as output.
Notes
-----
Since all ufuncs can take output arguments, this will always be (at least) 1.
Examples
--------
>>> np.add.nout
1
>>> np.multiply.nout
1
>>> np.power.nout
1
>>> np.exp.nout
1
"""))
add_newdoc('numpy.core', 'ufunc', ('ntypes',
"""
The number of types.
The number of numerical NumPy types - of which there are 18 total - on which
the ufunc can operate.
See Also
--------
numpy.ufunc.types
Examples
--------
>>> np.add.ntypes
18
>>> np.multiply.ntypes
18
>>> np.power.ntypes
17
>>> np.exp.ntypes
7
>>> np.remainder.ntypes
14
"""))
add_newdoc('numpy.core', 'ufunc', ('types',
"""
Returns a list with types grouped input->output.
Data attribute listing the data-type "Domain-Range" groupings the ufunc can
deliver. The data-types are given using the character codes.
See Also
--------
numpy.ufunc.ntypes
Examples
--------
>>> np.add.types
['??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l',
'LL->L', 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D',
'GG->G', 'OO->O']
>>> np.multiply.types
['??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l',
'LL->L', 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D',
'GG->G', 'OO->O']
>>> np.power.types
['bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', 'LL->L',
'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D', 'GG->G',
'OO->O']
>>> np.exp.types
['f->f', 'd->d', 'g->g', 'F->F', 'D->D', 'G->G', 'O->O']
>>> np.remainder.types
['bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', 'LL->L',
'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'OO->O']
"""))
##############################################################################
#
# ufunc methods
#
##############################################################################
add_newdoc('numpy.core', 'ufunc', ('reduce',
"""
reduce(a, axis=0, dtype=None, out=None, keepdims=False)
Reduces `a`'s dimension by one, by applying ufunc along one axis.
Let :math:`a.shape = (N_0, ..., N_i, ..., N_{M-1})`. Then
:math:`ufunc.reduce(a, axis=i)[k_0, ..,k_{i-1}, k_{i+1}, .., k_{M-1}]` =
the result of iterating `j` over :math:`range(N_i)`, cumulatively applying
ufunc to each :math:`a[k_0, ..,k_{i-1}, j, k_{i+1}, .., k_{M-1}]`.
For a one-dimensional array, reduce produces results equivalent to:
::
r = op.identity # op = ufunc
for i in range(len(A)):
r = op(r, A[i])
return r
For example, add.reduce() is equivalent to sum().
Parameters
----------
a : array_like
The array to act on.
axis : None or int or tuple of ints, optional
Axis or axes along which a reduction is performed.
The default (`axis` = 0) is perform a reduction over the first
dimension of the input array. `axis` may be negative, in
which case it counts from the last to the first axis.
.. versionadded:: 1.7.0
If this is `None`, a reduction is performed over all the axes.
If this is a tuple of ints, a reduction is performed on multiple
axes, instead of a single axis or all the axes as before.
For operations which are either not commutative or not associative,
doing a reduction over multiple axes is not well-defined. The
ufuncs do not currently raise an exception in this case, but will
likely do so in the future.
dtype : data-type code, optional
The type used to represent the intermediate results. Defaults
to the data-type of the output array if this is provided, or
the data-type of the input array if no output array is provided.
out : ndarray, optional
A location into which the result is stored. If not provided, a
freshly-allocated array is returned.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
.. versionadded:: 1.7.0
Returns
-------
r : ndarray
The reduced array. If `out` was supplied, `r` is a reference to it.
Examples
--------
>>> np.multiply.reduce([2,3,5])
30
A multi-dimensional array example:
>>> X = np.arange(8).reshape((2,2,2))
>>> X
array([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
>>> np.add.reduce(X, 0)
array([[ 4, 6],
[ 8, 10]])
>>> np.add.reduce(X) # confirm: default axis value is 0
array([[ 4, 6],
[ 8, 10]])
>>> np.add.reduce(X, 1)
array([[ 2, 4],
[10, 12]])
>>> np.add.reduce(X, 2)
array([[ 1, 5],
[ 9, 13]])
"""))
add_newdoc('numpy.core', 'ufunc', ('accumulate',
"""
accumulate(array, axis=0, dtype=None, out=None, keepdims=None)
Accumulate the result of applying the operator to all elements.
For a one-dimensional array, accumulate produces results equivalent to::
r = np.empty(len(A))
t = op.identity # op = the ufunc being applied to A's elements
for i in range(len(A)):
t = op(t, A[i])
r[i] = t
return r
For example, add.accumulate() is equivalent to np.cumsum().
For a multi-dimensional array, accumulate is applied along only one
axis (axis zero by default; see Examples below) so repeated use is
necessary if one wants to accumulate over multiple axes.
Parameters
----------
array : array_like
The array to act on.
axis : int, optional
The axis along which to apply the accumulation; default is zero.
dtype : data-type code, optional
The data-type used to represent the intermediate results. Defaults
to the data-type of the output array if such is provided, or the
the data-type of the input array if no output array is provided.
out : ndarray, optional
A location into which the result is stored. If not provided a
freshly-allocated array is returned.
keepdims : bool
Has no effect. Deprecated, and will be removed in future.
Returns
-------
r : ndarray
The accumulated values. If `out` was supplied, `r` is a reference to
`out`.
Examples
--------
1-D array examples:
>>> np.add.accumulate([2, 3, 5])
array([ 2, 5, 10])
>>> np.multiply.accumulate([2, 3, 5])
array([ 2, 6, 30])
2-D array examples:
>>> I = np.eye(2)
>>> I
array([[ 1., 0.],
[ 0., 1.]])
Accumulate along axis 0 (rows), down columns:
>>> np.add.accumulate(I, 0)
array([[ 1., 0.],
[ 1., 1.]])
>>> np.add.accumulate(I) # no axis specified = axis zero
array([[ 1., 0.],
[ 1., 1.]])
Accumulate along axis 1 (columns), through rows:
>>> np.add.accumulate(I, 1)
array([[ 1., 1.],
[ 0., 1.]])
"""))
add_newdoc('numpy.core', 'ufunc', ('reduceat',
"""
reduceat(a, indices, axis=0, dtype=None, out=None)
Performs a (local) reduce with specified slices over a single axis.
For i in ``range(len(indices))``, `reduceat` computes
``ufunc.reduce(a[indices[i]:indices[i+1]])``, which becomes the i-th
generalized "row" parallel to `axis` in the final result (i.e., in a
2-D array, for example, if `axis = 0`, it becomes the i-th row, but if
`axis = 1`, it becomes the i-th column). There are three exceptions to this:
* when ``i = len(indices) - 1`` (so for the last index),
``indices[i+1] = a.shape[axis]``.
* if ``indices[i] >= indices[i + 1]``, the i-th generalized "row" is
simply ``a[indices[i]]``.
* if ``indices[i] >= len(a)`` or ``indices[i] < 0``, an error is raised.
The shape of the output depends on the size of `indices`, and may be
larger than `a` (this happens if ``len(indices) > a.shape[axis]``).
Parameters
----------
a : array_like
The array to act on.
indices : array_like
Paired indices, comma separated (not colon), specifying slices to
reduce.
axis : int, optional
The axis along which to apply the reduceat.
dtype : data-type code, optional
The type used to represent the intermediate results. Defaults
to the data type of the output array if this is provided, or
the data type of the input array if no output array is provided.
out : ndarray, optional
A location into which the result is stored. If not provided a
freshly-allocated array is returned.
Returns
-------
r : ndarray
The reduced values. If `out` was supplied, `r` is a reference to
`out`.
Notes
-----
A descriptive example:
If `a` is 1-D, the function `ufunc.accumulate(a)` is the same as
``ufunc.reduceat(a, indices)[::2]`` where `indices` is
``range(len(array) - 1)`` with a zero placed
in every other element:
``indices = zeros(2 * len(a) - 1)``, ``indices[1::2] = range(1, len(a))``.
Don't be fooled by this attribute's name: `reduceat(a)` is not
necessarily smaller than `a`.
Examples
--------
To take the running sum of four successive values:
>>> np.add.reduceat(np.arange(8),[0,4, 1,5, 2,6, 3,7])[::2]
array([ 6, 10, 14, 18])
A 2-D example:
>>> x = np.linspace(0, 15, 16).reshape(4,4)
>>> x
array([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[ 12., 13., 14., 15.]])
::
# reduce such that the result has the following five rows:
# [row1 + row2 + row3]
# [row4]
# [row2]
# [row3]
# [row1 + row2 + row3 + row4]
>>> np.add.reduceat(x, [0, 3, 1, 2, 0])
array([[ 12., 15., 18., 21.],
[ 12., 13., 14., 15.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[ 24., 28., 32., 36.]])
::
# reduce such that result has the following two columns:
# [col1 * col2 * col3, col4]
>>> np.multiply.reduceat(x, [0, 3], 1)
array([[ 0., 3.],
[ 120., 7.],
[ 720., 11.],
[ 2184., 15.]])
"""))
add_newdoc('numpy.core', 'ufunc', ('outer',
"""
outer(A, B, **kwargs)
Apply the ufunc `op` to all pairs (a, b) with a in `A` and b in `B`.
Let ``M = A.ndim``, ``N = B.ndim``. Then the result, `C`, of
``op.outer(A, B)`` is an array of dimension M + N such that:
.. math:: C[i_0, ..., i_{M-1}, j_0, ..., j_{N-1}] =
op(A[i_0, ..., i_{M-1}], B[j_0, ..., j_{N-1}])
For `A` and `B` one-dimensional, this is equivalent to::
r = empty(len(A),len(B))
for i in range(len(A)):
for j in range(len(B)):
r[i,j] = op(A[i], B[j]) # op = ufunc in question
Parameters
----------
A : array_like
First array
B : array_like
Second array
kwargs : any
Arguments to pass on to the ufunc. Typically `dtype` or `out`.
Returns
-------
r : ndarray
Output array
See Also
--------
numpy.outer
Examples
--------
>>> np.multiply.outer([1, 2, 3], [4, 5, 6])
array([[ 4, 5, 6],
[ 8, 10, 12],
[12, 15, 18]])
A multi-dimensional example:
>>> A = np.array([[1, 2, 3], [4, 5, 6]])
>>> A.shape
(2, 3)
>>> B = np.array([[1, 2, 3, 4]])
>>> B.shape
(1, 4)
>>> C = np.multiply.outer(A, B)
>>> C.shape; C
(2, 3, 1, 4)
array([[[[ 1, 2, 3, 4]],
[[ 2, 4, 6, 8]],
[[ 3, 6, 9, 12]]],
[[[ 4, 8, 12, 16]],
[[ 5, 10, 15, 20]],
[[ 6, 12, 18, 24]]]])
"""))
add_newdoc('numpy.core', 'ufunc', ('at',
"""
at(a, indices, b=None)
Performs unbuffered in place operation on operand 'a' for elements
specified by 'indices'. For addition ufunc, this method is equivalent to
`a[indices] += b`, except that results are accumulated for elements that
are indexed more than once. For example, `a[[0,0]] += 1` will only
increment the first element once because of buffering, whereas
`add.at(a, [0,0], 1)` will increment the first element twice.
.. versionadded:: 1.8.0
Parameters
----------
a : array_like
The array to perform in place operation on.
indices : array_like or tuple
Array like index object or slice object for indexing into first
operand. If first operand has multiple dimensions, indices can be a
tuple of array like index objects or slice objects.
b : array_like
Second operand for ufuncs requiring two operands. Operand must be
broadcastable over first operand after indexing or slicing.
Examples
--------
Set items 0 and 1 to their negative values:
>>> a = np.array([1, 2, 3, 4])
>>> np.negative.at(a, [0, 1])
>>> print(a)
array([-1, -2, 3, 4])
::
Increment items 0 and 1, and increment item 2 twice:
>>> a = np.array([1, 2, 3, 4])
>>> np.add.at(a, [0, 1, 2, 2], 1)
>>> print(a)
array([2, 3, 5, 4])
::
Add items 0 and 1 in first array to second array,
and store results in first array:
>>> a = np.array([1, 2, 3, 4])
>>> b = np.array([1, 2])
>>> np.add.at(a, [0, 1], b)
>>> print(a)
array([2, 4, 3, 4])
"""))
##############################################################################
#
# Documentation for dtype attributes and methods
#
##############################################################################
##############################################################################
#
# dtype object
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'dtype',
"""
dtype(obj, align=False, copy=False)
Create a data type object.
A numpy array is homogeneous, and contains elements described by a
dtype object. A dtype object can be constructed from different
combinations of fundamental numeric types.
Parameters
----------
obj
Object to be converted to a data type object.
align : bool, optional
Add padding to the fields to match what a C compiler would output
for a similar C-struct. Can be ``True`` only if `obj` is a dictionary
or a comma-separated string. If a struct dtype is being created,
this also sets a sticky alignment flag ``isalignedstruct``.
copy : bool, optional
Make a new copy of the data-type object. If ``False``, the result
may just be a reference to a built-in data-type object.
See also
--------
result_type
Examples
--------
Using array-scalar type:
>>> np.dtype(np.int16)
dtype('int16')
Structured type, one field name 'f1', containing int16:
>>> np.dtype([('f1', np.int16)])
dtype([('f1', '<i2')])
Structured type, one field named 'f1', in itself containing a structured
type with one field:
>>> np.dtype([('f1', [('f1', np.int16)])])
dtype([('f1', [('f1', '<i2')])])
Structured type, two fields: the first field contains an unsigned int, the
second an int32:
>>> np.dtype([('f1', np.uint), ('f2', np.int32)])
dtype([('f1', '<u4'), ('f2', '<i4')])
Using array-protocol type strings:
>>> np.dtype([('a','f8'),('b','S10')])
dtype([('a', '<f8'), ('b', '|S10')])
Using comma-separated field formats. The shape is (2,3):
>>> np.dtype("i4, (2,3)f8")
dtype([('f0', '<i4'), ('f1', '<f8', (2, 3))])
Using tuples. ``int`` is a fixed type, 3 the field's shape. ``void``
is a flexible type, here of size 10:
>>> np.dtype([('hello',(np.int,3)),('world',np.void,10)])
dtype([('hello', '<i4', 3), ('world', '|V10')])
Subdivide ``int16`` into 2 ``int8``'s, called x and y. 0 and 1 are
the offsets in bytes:
>>> np.dtype((np.int16, {'x':(np.int8,0), 'y':(np.int8,1)}))
dtype(('<i2', [('x', '|i1'), ('y', '|i1')]))
Using dictionaries. Two fields named 'gender' and 'age':
>>> np.dtype({'names':['gender','age'], 'formats':['S1',np.uint8]})
dtype([('gender', '|S1'), ('age', '|u1')])
Offsets in bytes, here 0 and 25:
>>> np.dtype({'surname':('S25',0),'age':(np.uint8,25)})
dtype([('surname', '|S25'), ('age', '|u1')])
""")
##############################################################################
#
# dtype attributes
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'dtype', ('alignment',
"""
The required alignment (bytes) of this data-type according to the compiler.
More information is available in the C-API section of the manual.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('byteorder',
"""
A character indicating the byte-order of this data-type object.
One of:
=== ==============
'=' native
'<' little-endian
'>' big-endian
'|' not applicable
=== ==============
All built-in data-type objects have byteorder either '=' or '|'.
Examples
--------
>>> dt = np.dtype('i2')
>>> dt.byteorder
'='
>>> # endian is not relevant for 8 bit numbers
>>> np.dtype('i1').byteorder
'|'
>>> # or ASCII strings
>>> np.dtype('S2').byteorder
'|'
>>> # Even if specific code is given, and it is native
>>> # '=' is the byteorder
>>> import sys
>>> sys_is_le = sys.byteorder == 'little'
>>> native_code = sys_is_le and '<' or '>'
>>> swapped_code = sys_is_le and '>' or '<'
>>> dt = np.dtype(native_code + 'i2')
>>> dt.byteorder
'='
>>> # Swapped code shows up as itself
>>> dt = np.dtype(swapped_code + 'i2')
>>> dt.byteorder == swapped_code
True
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('char',
"""A unique character code for each of the 21 different built-in types."""))
add_newdoc('numpy.core.multiarray', 'dtype', ('descr',
"""
PEP3118 interface description of the data-type.
The format is that required by the 'descr' key in the
PEP3118 `__array_interface__` attribute.
Warning: This attribute exists specifically for PEP3118 compliance, and
is not a datatype description compatible with `np.dtype`.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('fields',
"""
Dictionary of named fields defined for this data type, or ``None``.
The dictionary is indexed by keys that are the names of the fields.
Each entry in the dictionary is a tuple fully describing the field::
(dtype, offset[, title])
If present, the optional title can be any object (if it is a string
or unicode then it will also be a key in the fields dictionary,
otherwise it's meta-data). Notice also that the first two elements
of the tuple can be passed directly as arguments to the ``ndarray.getfield``
and ``ndarray.setfield`` methods.
See Also
--------
ndarray.getfield, ndarray.setfield
Examples
--------
>>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))])
>>> print(dt.fields)
{'grades': (dtype(('float64',(2,))), 16), 'name': (dtype('|S16'), 0)}
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('flags',
"""
Bit-flags describing how this data type is to be interpreted.
Bit-masks are in `numpy.core.multiarray` as the constants
`ITEM_HASOBJECT`, `LIST_PICKLE`, `ITEM_IS_POINTER`, `NEEDS_INIT`,
`NEEDS_PYAPI`, `USE_GETITEM`, `USE_SETITEM`. A full explanation
of these flags is in C-API documentation; they are largely useful
for user-defined data-types.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('hasobject',
"""
Boolean indicating whether this dtype contains any reference-counted
objects in any fields or sub-dtypes.
Recall that what is actually in the ndarray memory representing
the Python object is the memory address of that object (a pointer).
Special handling may be required, and this attribute is useful for
distinguishing data types that may contain arbitrary Python objects
and data-types that won't.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('isbuiltin',
"""
Integer indicating how this dtype relates to the built-in dtypes.
Read-only.
= ========================================================================
0 if this is a structured array type, with fields
1 if this is a dtype compiled into numpy (such as ints, floats etc)
2 if the dtype is for a user-defined numpy type
A user-defined type uses the numpy C-API machinery to extend
numpy to handle a new array type. See
:ref:`user.user-defined-data-types` in the NumPy manual.
= ========================================================================
Examples
--------
>>> dt = np.dtype('i2')
>>> dt.isbuiltin
1
>>> dt = np.dtype('f8')
>>> dt.isbuiltin
1
>>> dt = np.dtype([('field1', 'f8')])
>>> dt.isbuiltin
0
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('isnative',
"""
Boolean indicating whether the byte order of this dtype is native
to the platform.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('isalignedstruct',
"""
Boolean indicating whether the dtype is a struct which maintains
field alignment. This flag is sticky, so when combining multiple
structs together, it is preserved and produces new dtypes which
are also aligned.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('itemsize',
"""
The element size of this data-type object.
For 18 of the 21 types this number is fixed by the data-type.
For the flexible data-types, this number can be anything.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('kind',
"""
A character code (one of 'biufcmMOSUV') identifying the general kind of data.
= ======================
b boolean
i signed integer
u unsigned integer
f floating-point
c complex floating-point
m timedelta
M datetime
O object
S (byte-)string
U Unicode
V void
= ======================
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('name',
"""
A bit-width name for this data-type.
Un-sized flexible data-type objects do not have this attribute.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('names',
"""
Ordered list of field names, or ``None`` if there are no fields.
The names are ordered according to increasing byte offset. This can be
used, for example, to walk through all of the named fields in offset order.
Examples
--------
>>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))])
>>> dt.names
('name', 'grades')
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('num',
"""
A unique number for each of the 21 different built-in types.
These are roughly ordered from least-to-most precision.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('shape',
"""
Shape tuple of the sub-array if this data type describes a sub-array,
and ``()`` otherwise.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('str',
"""The array-protocol typestring of this data-type object."""))
add_newdoc('numpy.core.multiarray', 'dtype', ('subdtype',
"""
Tuple ``(item_dtype, shape)`` if this `dtype` describes a sub-array, and
None otherwise.
The *shape* is the fixed shape of the sub-array described by this
data type, and *item_dtype* the data type of the array.
If a field whose dtype object has this attribute is retrieved,
then the extra dimensions implied by *shape* are tacked on to
the end of the retrieved array.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('type',
"""The type object used to instantiate a scalar of this data-type."""))
##############################################################################
#
# dtype methods
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'dtype', ('newbyteorder',
"""
newbyteorder(new_order='S')
Return a new dtype with a different byte order.
Changes are also made in all fields and sub-arrays of the data type.
Parameters
----------
new_order : string, optional
Byte order to force; a value from the byte order specifications
below. The default value ('S') results in swapping the current
byte order. `new_order` codes can be any of:
* 'S' - swap dtype from current to opposite endian
* {'<', 'L'} - little endian
* {'>', 'B'} - big endian
* {'=', 'N'} - native order
* {'|', 'I'} - ignore (no change to byte order)
The code does a case-insensitive check on the first letter of
`new_order` for these alternatives. For example, any of '>'
or 'B' or 'b' or 'brian' are valid to specify big-endian.
Returns
-------
new_dtype : dtype
New dtype object with the given change to the byte order.
Notes
-----
Changes are also made in all fields and sub-arrays of the data type.
Examples
--------
>>> import sys
>>> sys_is_le = sys.byteorder == 'little'
>>> native_code = sys_is_le and '<' or '>'
>>> swapped_code = sys_is_le and '>' or '<'
>>> native_dt = np.dtype(native_code+'i2')
>>> swapped_dt = np.dtype(swapped_code+'i2')
>>> native_dt.newbyteorder('S') == swapped_dt
True
>>> native_dt.newbyteorder() == swapped_dt
True
>>> native_dt == swapped_dt.newbyteorder('S')
True
>>> native_dt == swapped_dt.newbyteorder('=')
True
>>> native_dt == swapped_dt.newbyteorder('N')
True
>>> native_dt == native_dt.newbyteorder('|')
True
>>> np.dtype('<i2') == native_dt.newbyteorder('<')
True
>>> np.dtype('<i2') == native_dt.newbyteorder('L')
True
>>> np.dtype('>i2') == native_dt.newbyteorder('>')
True
>>> np.dtype('>i2') == native_dt.newbyteorder('B')
True
"""))
##############################################################################
#
# Datetime-related Methods
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'busdaycalendar',
"""
busdaycalendar(weekmask='1111100', holidays=None)
A business day calendar object that efficiently stores information
defining valid days for the busday family of functions.
The default valid days are Monday through Friday ("business days").
A busdaycalendar object can be specified with any set of weekly
valid days, plus an optional "holiday" dates that always will be invalid.
Once a busdaycalendar object is created, the weekmask and holidays
cannot be modified.
.. versionadded:: 1.7.0
Parameters
----------
weekmask : str or array_like of bool, optional
A seven-element array indicating which of Monday through Sunday are
valid days. May be specified as a length-seven list or array, like
[1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string
like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for
weekdays, optionally separated by white space. Valid abbreviations
are: Mon Tue Wed Thu Fri Sat Sun
holidays : array_like of datetime64[D], optional
An array of dates to consider as invalid dates, no matter which
weekday they fall upon. Holiday dates may be specified in any
order, and NaT (not-a-time) dates are ignored. This list is
saved in a normalized form that is suited for fast calculations
of valid days.
Returns
-------
out : busdaycalendar
A business day calendar object containing the specified
weekmask and holidays values.
See Also
--------
is_busday : Returns a boolean array indicating valid days.
busday_offset : Applies an offset counted in valid days.
busday_count : Counts how many valid days are in a half-open date range.
Attributes
----------
Note: once a busdaycalendar object is created, you cannot modify the
weekmask or holidays. The attributes return copies of internal data.
weekmask : (copy) seven-element array of bool
holidays : (copy) sorted array of datetime64[D]
Examples
--------
>>> # Some important days in July
... bdd = np.busdaycalendar(
... holidays=['2011-07-01', '2011-07-04', '2011-07-17'])
>>> # Default is Monday to Friday weekdays
... bdd.weekmask
array([ True, True, True, True, True, False, False], dtype='bool')
>>> # Any holidays already on the weekend are removed
... bdd.holidays
array(['2011-07-01', '2011-07-04'], dtype='datetime64[D]')
""")
add_newdoc('numpy.core.multiarray', 'busdaycalendar', ('weekmask',
"""A copy of the seven-element boolean mask indicating valid days."""))
add_newdoc('numpy.core.multiarray', 'busdaycalendar', ('holidays',
"""A copy of the holiday array indicating additional invalid days."""))
add_newdoc('numpy.core.multiarray', 'is_busday',
"""
is_busday(dates, weekmask='1111100', holidays=None, busdaycal=None, out=None)
Calculates which of the given dates are valid days, and which are not.
.. versionadded:: 1.7.0
Parameters
----------
dates : array_like of datetime64[D]
The array of dates to process.
weekmask : str or array_like of bool, optional
A seven-element array indicating which of Monday through Sunday are
valid days. May be specified as a length-seven list or array, like
[1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string
like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for
weekdays, optionally separated by white space. Valid abbreviations
are: Mon Tue Wed Thu Fri Sat Sun
holidays : array_like of datetime64[D], optional
An array of dates to consider as invalid dates. They may be
specified in any order, and NaT (not-a-time) dates are ignored.
This list is saved in a normalized form that is suited for
fast calculations of valid days.
busdaycal : busdaycalendar, optional
A `busdaycalendar` object which specifies the valid days. If this
parameter is provided, neither weekmask nor holidays may be
provided.
out : array of bool, optional
If provided, this array is filled with the result.
Returns
-------
out : array of bool
An array with the same shape as ``dates``, containing True for
each valid day, and False for each invalid day.
See Also
--------
busdaycalendar: An object that specifies a custom set of valid days.
busday_offset : Applies an offset counted in valid days.
busday_count : Counts how many valid days are in a half-open date range.
Examples
--------
>>> # The weekdays are Friday, Saturday, and Monday
... np.is_busday(['2011-07-01', '2011-07-02', '2011-07-18'],
... holidays=['2011-07-01', '2011-07-04', '2011-07-17'])
array([False, False, True], dtype='bool')
""")
add_newdoc('numpy.core.multiarray', 'busday_offset',
"""
busday_offset(dates, offsets, roll='raise', weekmask='1111100', holidays=None, busdaycal=None, out=None)
First adjusts the date to fall on a valid day according to
the ``roll`` rule, then applies offsets to the given dates
counted in valid days.
.. versionadded:: 1.7.0
Parameters
----------
dates : array_like of datetime64[D]
The array of dates to process.
offsets : array_like of int
The array of offsets, which is broadcast with ``dates``.
roll : {'raise', 'nat', 'forward', 'following', 'backward', 'preceding', 'modifiedfollowing', 'modifiedpreceding'}, optional
How to treat dates that do not fall on a valid day. The default
is 'raise'.
* 'raise' means to raise an exception for an invalid day.
* 'nat' means to return a NaT (not-a-time) for an invalid day.
* 'forward' and 'following' mean to take the first valid day
later in time.
* 'backward' and 'preceding' mean to take the first valid day
earlier in time.
* 'modifiedfollowing' means to take the first valid day
later in time unless it is across a Month boundary, in which
case to take the first valid day earlier in time.
* 'modifiedpreceding' means to take the first valid day
earlier in time unless it is across a Month boundary, in which
case to take the first valid day later in time.
weekmask : str or array_like of bool, optional
A seven-element array indicating which of Monday through Sunday are
valid days. May be specified as a length-seven list or array, like
[1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string
like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for
weekdays, optionally separated by white space. Valid abbreviations
are: Mon Tue Wed Thu Fri Sat Sun
holidays : array_like of datetime64[D], optional
An array of dates to consider as invalid dates. They may be
specified in any order, and NaT (not-a-time) dates are ignored.
This list is saved in a normalized form that is suited for
fast calculations of valid days.
busdaycal : busdaycalendar, optional
A `busdaycalendar` object which specifies the valid days. If this
parameter is provided, neither weekmask nor holidays may be
provided.
out : array of datetime64[D], optional
If provided, this array is filled with the result.
Returns
-------
out : array of datetime64[D]
An array with a shape from broadcasting ``dates`` and ``offsets``
together, containing the dates with offsets applied.
See Also
--------
busdaycalendar: An object that specifies a custom set of valid days.
is_busday : Returns a boolean array indicating valid days.
busday_count : Counts how many valid days are in a half-open date range.
Examples
--------
>>> # First business day in October 2011 (not accounting for holidays)
... np.busday_offset('2011-10', 0, roll='forward')
numpy.datetime64('2011-10-03','D')
>>> # Last business day in February 2012 (not accounting for holidays)
... np.busday_offset('2012-03', -1, roll='forward')
numpy.datetime64('2012-02-29','D')
>>> # Third Wednesday in January 2011
... np.busday_offset('2011-01', 2, roll='forward', weekmask='Wed')
numpy.datetime64('2011-01-19','D')
>>> # 2012 Mother's Day in Canada and the U.S.
... np.busday_offset('2012-05', 1, roll='forward', weekmask='Sun')
numpy.datetime64('2012-05-13','D')
>>> # First business day on or after a date
... np.busday_offset('2011-03-20', 0, roll='forward')
numpy.datetime64('2011-03-21','D')
>>> np.busday_offset('2011-03-22', 0, roll='forward')
numpy.datetime64('2011-03-22','D')
>>> # First business day after a date
... np.busday_offset('2011-03-20', 1, roll='backward')
numpy.datetime64('2011-03-21','D')
>>> np.busday_offset('2011-03-22', 1, roll='backward')
numpy.datetime64('2011-03-23','D')
""")
add_newdoc('numpy.core.multiarray', 'busday_count',
"""
busday_count(begindates, enddates, weekmask='1111100', holidays=[], busdaycal=None, out=None)
Counts the number of valid days between `begindates` and
`enddates`, not including the day of `enddates`.
If ``enddates`` specifies a date value that is earlier than the
corresponding ``begindates`` date value, the count will be negative.
.. versionadded:: 1.7.0
Parameters
----------
begindates : array_like of datetime64[D]
The array of the first dates for counting.
enddates : array_like of datetime64[D]
The array of the end dates for counting, which are excluded
from the count themselves.
weekmask : str or array_like of bool, optional
A seven-element array indicating which of Monday through Sunday are
valid days. May be specified as a length-seven list or array, like
[1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string
like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for
weekdays, optionally separated by white space. Valid abbreviations
are: Mon Tue Wed Thu Fri Sat Sun
holidays : array_like of datetime64[D], optional
An array of dates to consider as invalid dates. They may be
specified in any order, and NaT (not-a-time) dates are ignored.
This list is saved in a normalized form that is suited for
fast calculations of valid days.
busdaycal : busdaycalendar, optional
A `busdaycalendar` object which specifies the valid days. If this
parameter is provided, neither weekmask nor holidays may be
provided.
out : array of int, optional
If provided, this array is filled with the result.
Returns
-------
out : array of int
An array with a shape from broadcasting ``begindates`` and ``enddates``
together, containing the number of valid days between
the begin and end dates.
See Also
--------
busdaycalendar: An object that specifies a custom set of valid days.
is_busday : Returns a boolean array indicating valid days.
busday_offset : Applies an offset counted in valid days.
Examples
--------
>>> # Number of weekdays in January 2011
... np.busday_count('2011-01', '2011-02')
21
>>> # Number of weekdays in 2011
... np.busday_count('2011', '2012')
260
>>> # Number of Saturdays in 2011
... np.busday_count('2011', '2012', weekmask='Sat')
53
""")
##############################################################################
#
# nd_grid instances
#
##############################################################################
add_newdoc('numpy.lib.index_tricks', 'mgrid',
"""
`nd_grid` instance which returns a dense multi-dimensional "meshgrid".
An instance of `numpy.lib.index_tricks.nd_grid` which returns an dense
(or fleshed out) mesh-grid when indexed, so that each returned argument
has the same shape. The dimensions and number of the output arrays are
equal to the number of indexing dimensions. If the step length is not a
complex number, then the stop is not inclusive.
However, if the step length is a **complex number** (e.g. 5j), then
the integer part of its magnitude is interpreted as specifying the
number of points to create between the start and stop values, where
the stop value **is inclusive**.
Returns
----------
mesh-grid `ndarrays` all of the same dimensions
See Also
--------
numpy.lib.index_tricks.nd_grid : class of `ogrid` and `mgrid` objects
ogrid : like mgrid but returns open (not fleshed out) mesh grids
r_ : array concatenator
Examples
--------
>>> np.mgrid[0:5,0:5]
array([[[0, 0, 0, 0, 0],
[1, 1, 1, 1, 1],
[2, 2, 2, 2, 2],
[3, 3, 3, 3, 3],
[4, 4, 4, 4, 4]],
[[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4]]])
>>> np.mgrid[-1:1:5j]
array([-1. , -0.5, 0. , 0.5, 1. ])
""")
add_newdoc('numpy.lib.index_tricks', 'ogrid',
"""
`nd_grid` instance which returns an open multi-dimensional "meshgrid".
An instance of `numpy.lib.index_tricks.nd_grid` which returns an open
(i.e. not fleshed out) mesh-grid when indexed, so that only one dimension
of each returned array is greater than 1. The dimension and number of the
output arrays are equal to the number of indexing dimensions. If the step
length is not a complex number, then the stop is not inclusive.
However, if the step length is a **complex number** (e.g. 5j), then
the integer part of its magnitude is interpreted as specifying the
number of points to create between the start and stop values, where
the stop value **is inclusive**.
Returns
----------
mesh-grid `ndarrays` with only one dimension :math:`\\neq 1`
See Also
--------
np.lib.index_tricks.nd_grid : class of `ogrid` and `mgrid` objects
mgrid : like `ogrid` but returns dense (or fleshed out) mesh grids
r_ : array concatenator
Examples
--------
>>> from numpy import ogrid
>>> ogrid[-1:1:5j]
array([-1. , -0.5, 0. , 0.5, 1. ])
>>> ogrid[0:5,0:5]
[array([[0],
[1],
[2],
[3],
[4]]), array([[0, 1, 2, 3, 4]])]
""")
##############################################################################
#
# Documentation for `generic` attributes and methods
#
##############################################################################
add_newdoc('numpy.core.numerictypes', 'generic',
"""
Base class for numpy scalar types.
Class from which most (all?) numpy scalar types are derived. For
consistency, exposes the same API as `ndarray`, despite many
consequent attributes being either "get-only," or completely irrelevant.
This is the class from which it is strongly suggested users should derive
custom scalar types.
""")
# Attributes
add_newdoc('numpy.core.numerictypes', 'generic', ('T',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class so as to
provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('base',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class so as to
a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('data',
"""Pointer to start of data."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('dtype',
"""Get array data-descriptor."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('flags',
"""The integer value of flags."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('flat',
"""A 1-D view of the scalar."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('imag',
"""The imaginary part of the scalar."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('itemsize',
"""The length of one element in bytes."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('nbytes',
"""The length of the scalar in bytes."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('ndim',
"""The number of array dimensions."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('real',
"""The real part of the scalar."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('shape',
"""Tuple of array dimensions."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('size',
"""The number of elements in the gentype."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('strides',
"""Tuple of bytes steps in each dimension."""))
# Methods
add_newdoc('numpy.core.numerictypes', 'generic', ('all',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('any',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('argmax',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('argmin',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('argsort',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('astype',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('byteswap',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class so as to
provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('choose',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('clip',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('compress',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('conjugate',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('copy',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('cumprod',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('cumsum',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('diagonal',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('dump',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('dumps',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('fill',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('flatten',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('getfield',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('item',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('itemset',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('max',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('mean',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('min',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('newbyteorder',
"""
newbyteorder(new_order='S')
Return a new `dtype` with a different byte order.
Changes are also made in all fields and sub-arrays of the data type.
The `new_order` code can be any from the following:
* 'S' - swap dtype from current to opposite endian
* {'<', 'L'} - little endian
* {'>', 'B'} - big endian
* {'=', 'N'} - native order
* {'|', 'I'} - ignore (no change to byte order)
Parameters
----------
new_order : str, optional
Byte order to force; a value from the byte order specifications
above. The default value ('S') results in swapping the current
byte order. The code does a case-insensitive check on the first
letter of `new_order` for the alternatives above. For example,
any of 'B' or 'b' or 'biggish' are valid to specify big-endian.
Returns
-------
new_dtype : dtype
New `dtype` object with the given change to the byte order.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('nonzero',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('prod',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('ptp',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('put',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('ravel',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('repeat',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('reshape',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('resize',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('round',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('searchsorted',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('setfield',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('setflags',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class so as to
provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('sort',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('squeeze',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('std',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('sum',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('swapaxes',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('take',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('tofile',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('tolist',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('tostring',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('trace',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('transpose',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('var',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
add_newdoc('numpy.core.numerictypes', 'generic', ('view',
"""
Not implemented (virtual attribute)
Class generic exists solely to derive numpy scalars from, and possesses,
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
See Also
--------
The corresponding attribute of the derived class of interest.
"""))
##############################################################################
#
# Documentation for other scalar classes
#
##############################################################################
add_newdoc('numpy.core.numerictypes', 'bool_',
"""NumPy's Boolean type. Character code: ``?``. Alias: bool8""")
add_newdoc('numpy.core.numerictypes', 'complex64',
"""
Complex number type composed of two 32 bit floats. Character code: 'F'.
""")
add_newdoc('numpy.core.numerictypes', 'complex128',
"""
Complex number type composed of two 64 bit floats. Character code: 'D'.
Python complex compatible.
""")
add_newdoc('numpy.core.numerictypes', 'complex256',
"""
Complex number type composed of two 128-bit floats. Character code: 'G'.
""")
add_newdoc('numpy.core.numerictypes', 'float32',
"""
32-bit floating-point number. Character code 'f'. C float compatible.
""")
add_newdoc('numpy.core.numerictypes', 'float64',
"""
64-bit floating-point number. Character code 'd'. Python float compatible.
""")
add_newdoc('numpy.core.numerictypes', 'float96',
"""
""")
add_newdoc('numpy.core.numerictypes', 'float128',
"""
128-bit floating-point number. Character code: 'g'. C long float
compatible.
""")
add_newdoc('numpy.core.numerictypes', 'int8',
"""8-bit integer. Character code ``b``. C char compatible.""")
add_newdoc('numpy.core.numerictypes', 'int16',
"""16-bit integer. Character code ``h``. C short compatible.""")
add_newdoc('numpy.core.numerictypes', 'int32',
"""32-bit integer. Character code 'i'. C int compatible.""")
add_newdoc('numpy.core.numerictypes', 'int64',
"""64-bit integer. Character code 'l'. Python int compatible.""")
add_newdoc('numpy.core.numerictypes', 'object_',
"""Any Python object. Character code: 'O'.""")
|
maniteja123/numpy
|
numpy/add_newdocs.py
|
Python
|
bsd-3-clause
| 225,018
|
[
"Brian"
] |
bab90c311959076ce9230f7ed13270b94500816c0ec42830c5d306e5d8a18cf1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.