text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
import json
from django.conf import settings
from django.test import TestCase
from django.utils.importlib import import_module
from funfactory.urlresolvers import reverse
import mock
from nose.tools import ok_, eq_
from airmozilla.authentication.browserid_mock import mock_browserid
from airmozilla.base import mozillians
from airmozilla.base.tests.testbase import Response
from airmozilla.main.models import UserProfile
VOUCHED_FOR = """
{
"meta": {
"previous": null,
"total_count": 1,
"offset": 0,
"limit": 20,
"next": null
},
"objects": [
{
"website": "",
"bio": "",
"resource_uri": "/api/v1/users/2429/",
"last_updated": "2012-11-06T14:41:47",
"groups": [
"ugly tuna"
],
"city": "Casino",
"skills": [],
"country": "Albania",
"region": "Bush",
"id": "2429",
"languages": [],
"allows_mozilla_sites": true,
"photo": "http://www.gravatar.com/avatar/0409b497734934400822bb33...",
"is_vouched": true,
"email": "peterbe@gmail.com",
"ircname": "",
"allows_community_sites": true,
"full_name": "Peter Bengtsson"
}
]
}
"""
NOT_VOUCHED_FOR = """
{
"meta": {
"previous": null,
"total_count": 1,
"offset": 0,
"limit": 20,
"next": null
},
"objects": [
{
"website": "http://www.peterbe.com/",
"bio": "",
"resource_uri": "/api/v1/users/2430/",
"last_updated": "2012-11-06T15:37:35",
"groups": [
"no beard"
],
"city": "<style>p{font-style:italic}</style>",
"skills": [],
"country": "Heard Island and McDonald Islands",
"region": "Drunk",
"id": "2430",
"languages": [],
"allows_mozilla_sites": true,
"photo": "http://www.gravatar.com/avatar/23c6d359b6f7af3d3f91ca9e17...",
"is_vouched": false,
"email": "tmickel@mit.edu",
"ircname": "",
"allows_community_sites": true,
"full_name": null
}
]
}
"""
class TestViews(TestCase):
def setUp(self):
super(TestViews, self).setUp()
engine = import_module(settings.SESSION_ENGINE)
store = engine.SessionStore()
store.save() # we need to make load() work, or the cookie is worthless
self.client.cookies[settings.SESSION_COOKIE_NAME] = store.session_key
def shortDescription(self):
# Stop nose using the test docstring and instead the test method name.
pass
def get_messages(self):
return self.client.session['_messages']
def _login_attempt(self, email, assertion='fakeassertion123', next=None):
if not next:
next = '/'
with mock_browserid(email):
post_data = {
'assertion': assertion,
'next': next
}
return self.client.post(
'/browserid/login/',
post_data
)
def test_invalid(self):
"""Bad BrowserID form (i.e. no assertion) -> failure."""
response = self._login_attempt(None, None)
eq_(response['content-type'], 'application/json')
redirect = json.loads(response.content)['redirect']
eq_(redirect, settings.LOGIN_REDIRECT_URL_FAILURE)
# self.assertRedirects(
# response,
# settings.LOGIN_REDIRECT_URL_FAILURE + '?bid_login_failed=1'
# )
def test_bad_verification(self):
"""Bad verification -> failure."""
response = self._login_attempt(None)
eq_(response['content-type'], 'application/json')
redirect = json.loads(response.content)['redirect']
eq_(redirect, settings.LOGIN_REDIRECT_URL_FAILURE)
# self.assertRedirects(
# response,
# settings.LOGIN_REDIRECT_URL_FAILURE + '?bid_login_failed=1'
# )
@mock.patch('requests.get')
def test_nonmozilla(self, rget):
"""Non-Mozilla email -> failure."""
def mocked_get(url, **options):
if 'tmickel' in url:
return Response(NOT_VOUCHED_FOR)
if 'peterbe' in url:
return Response(VOUCHED_FOR)
raise NotImplementedError(url)
rget.side_effect = mocked_get
response = self._login_attempt('tmickel@mit.edu')
eq_(response['content-type'], 'application/json')
redirect = json.loads(response.content)['redirect']
eq_(redirect, settings.LOGIN_REDIRECT_URL_FAILURE)
# self.assertRedirects(
# response,
# settings.LOGIN_REDIRECT_URL_FAILURE + '?bid_login_failed=1'
# )
# now with a non-mozillian that is vouched for
response = self._login_attempt('peterbe@gmail.com')
eq_(response['content-type'], 'application/json')
redirect = json.loads(response.content)['redirect']
eq_(redirect, settings.LOGIN_REDIRECT_URL)
# self.assertRedirects(response,
# settings.LOGIN_REDIRECT_URL)
@mock.patch('requests.get')
def test_nonmozilla_vouched_for_second_time(self, rget):
assert not UserProfile.objects.all()
def mocked_get(url, **options):
return Response(VOUCHED_FOR)
rget.side_effect = mocked_get
# now with a non-mozillian that is vouched for
response = self._login_attempt('peterbe@gmail.com')
eq_(response['content-type'], 'application/json')
redirect = json.loads(response.content)['redirect']
eq_(redirect, settings.LOGIN_REDIRECT_URL)
# self.assertRedirects(response,
# settings.LOGIN_REDIRECT_URL)
# should be logged in
response = self.client.get('/')
eq_(response.status_code, 200)
ok_('Sign in' not in response.content)
ok_('Sign out' in response.content)
profile, = UserProfile.objects.all()
ok_(profile.contributor)
# sign out
response = self.client.get(reverse('browserid.logout'))
eq_(response.status_code, 405)
response = self.client.post(reverse('browserid.logout'))
eq_(response.status_code, 200)
eq_(response['content-type'], 'application/json')
redirect = json.loads(response.content)['redirect']
eq_(redirect, settings.LOGIN_REDIRECT_URL)
# should be logged out
response = self.client.get('/')
eq_(response.status_code, 200)
ok_('Sign in' in response.content)
ok_('Sign out' not in response.content)
# sign in again
response = self._login_attempt('peterbe@gmail.com')
eq_(response['content-type'], 'application/json')
redirect = json.loads(response.content)['redirect']
eq_(redirect, settings.LOGIN_REDIRECT_URL)
# self.assertRedirects(response,
# settings.LOGIN_REDIRECT_URL)
# should not have created another one
eq_(UserProfile.objects.all().count(), 1)
# sign out again
response = self.client.post(reverse('browserid.logout'))
eq_(response.status_code, 200)
eq_(response['content-type'], 'application/json')
redirect = json.loads(response.content)['redirect']
eq_(redirect, settings.LOGIN_REDIRECT_URL)
# pretend this is lost
profile.contributor = False
profile.save()
response = self._login_attempt('peterbe@gmail.com')
eq_(response['content-type'], 'application/json')
redirect = json.loads(response.content)['redirect']
eq_(redirect, settings.LOGIN_REDIRECT_URL)
# self.assertRedirects(response,
# settings.LOGIN_REDIRECT_URL)
# should not have created another one
eq_(UserProfile.objects.filter(contributor=True).count(), 1)
def test_mozilla(self):
"""Mozilla email -> success."""
# Try the first allowed domain
response = self._login_attempt('tmickel@' + settings.ALLOWED_BID[0])
eq_(response['content-type'], 'application/json')
redirect = json.loads(response.content)['redirect']
eq_(redirect, settings.LOGIN_REDIRECT_URL)
# self.assertRedirects(response,
# settings.LOGIN_REDIRECT_URL)
@mock.patch('requests.get')
def test_was_contributor_now_mozilla_bid(self, rget):
"""Suppose a user *was* a contributor but now her domain name
is one of the allowed ones, it should undo that contributor status
"""
assert not UserProfile.objects.all()
def mocked_get(url, **options):
return Response(VOUCHED_FOR)
rget.side_effect = mocked_get
response = self._login_attempt('peterbe@gmail.com')
eq_(response['content-type'], 'application/json')
redirect = json.loads(response.content)['redirect']
eq_(redirect, settings.LOGIN_REDIRECT_URL)
response = self.client.get('/')
eq_(response.status_code, 200)
ok_('Sign in' not in response.content)
ok_('Sign out' in response.content)
profile = UserProfile.objects.get(user__email='peterbe@gmail.com')
ok_(profile.contributor)
self.client.logout()
response = self.client.get('/')
eq_(response.status_code, 200)
ok_('Sign in' in response.content)
ok_('Sign out' not in response.content)
with self.settings(ALLOWED_BID=settings.ALLOWED_BID + ('gmail.com',)):
response = self._login_attempt('peterbe@gmail.com')
eq_(response['content-type'], 'application/json')
redirect = json.loads(response.content)['redirect']
eq_(redirect, settings.LOGIN_REDIRECT_URL)
profile = UserProfile.objects.get(user__email='peterbe@gmail.com')
ok_(not profile.contributor) # fixed!
@mock.patch('airmozilla.authentication.views.logger')
@mock.patch('requests.get')
def test_nonmozilla_mozillians_unhappy(self, rget, rlogger):
assert not UserProfile.objects.all()
def mocked_get(url, **options):
raise mozillians.BadStatusCodeError('crap!')
rget.side_effect = mocked_get
# now with a non-mozillian that is vouched for
response = self._login_attempt('peterbe@gmail.com')
eq_(response['content-type'], 'application/json')
redirect = json.loads(response.content)['redirect']
eq_(redirect, settings.LOGIN_REDIRECT_URL_FAILURE)
# self.assertRedirects(
# response,
# settings.LOGIN_REDIRECT_URL_FAILURE + '?bid_login_failed=1'
# )
eq_(rlogger.error.call_count, 1)
|
chirilo/airmozilla
|
airmozilla/authentication/tests/test_views.py
|
Python
|
bsd-3-clause
| 10,650
|
[
"CASINO"
] |
309d73bea5791f11a2b8166e18ea93c0cb84c064e6ea176ddbf37ab799bde94c
|
#!/usr/bin/env python
from pymatgen.analysis.chemenv.utils.scripts_utils import compute_environments, welcome, thankyou
from pymatgen.analysis.chemenv.utils.chemenv_config import ChemEnvConfig
from argparse import ArgumentParser
import logging
__author__ = 'waroquiers'
def main():
m_description = 'Welcome to the Chemical Environment Package.'
parser = ArgumentParser(description=m_description)
setup_help = 'Used to setup the configuration of the package '
setup_help += '(MaterialsProject access, ICSD database access, package options, ...)'
parser.add_argument('-s', '--setup', help=setup_help, action='store_true')
parser.add_argument('-m', '--message-level', help='Message level (DEBUG, INFO, WARNING, ERROR or CRITICAL - '
'default : WARNING)',
default='WARNING')
args = parser.parse_args()
if args.setup:
chemenv_config = ChemEnvConfig.auto_load()
chemenv_config.setup()
print('\n Setup completed')
else:
chemenv_config = ChemEnvConfig.auto_load()
welcome(chemenv_config)
logging.basicConfig(format='%(levelname)s:%(module)s:%(funcName)s:%(message)s', level=args.message_level)
compute_environments(chemenv_config)
thankyou()
if __name__ == '__main__':
main()
|
fraricci/pymatgen
|
pymatgen/cli/get_environment.py
|
Python
|
mit
| 1,341
|
[
"pymatgen"
] |
e72d5f7d965d7646bf5bbe14b0c4f60d3a0baac34039673357bca32fa15d3884
|
# coding: utf-8
from __future__ import division, unicode_literals
"""
Some reimplementation of Henkelman's Transition State Analysis utilities,
which are originally in Perl. Additional features beyond those offered by
Henkelman's utilities will be added.
This allows the usage and customization in Python.
"""
__author__ = 'Shyue Ping Ong'
__copyright__ = 'Copyright 2013, The Materials Virtual Lab'
__version__ = '0.1'
__maintainer__ = 'Shyue Ping Ong'
__email__ = 'ongsp@ucsd.edu'
__date__ = '6/1/15'
import os
import glob
import numpy as np
from scipy.interpolate import PiecewisePolynomial
from pymatgen.util.plotting_utils import get_publication_quality_plot
from pymatgen.io.vaspio import Poscar, Outcar
class NEBAnalysis(object):
"""
An NEBAnalysis class.
"""
def __init__(self, outcars, structures, interpolation_order=3):
"""
Initializes an NEBAnalysis from Outcar and Structure objects. Use
the static constructors, e.g., :class:`from_dir` instead if you
prefer to have these automatically generated from a directory of NEB
calculations.
Args:
outcars ([Outcar]): List of Outcar objects. Note that these have
to be ordered from start to end along reaction coordinates.
structures ([Structure]): List of Structures along reaction
coordinate. Must be same length as outcar.
interpolation_order (int): Order of polynomial to use to
interpolate between images. Same format as order parameter in
scipy.interplotate.PiecewisePolynomial.
"""
if len(outcars) != len(structures):
raise ValueError("# of Outcars must be same as # of Structures")
# Calculate cumulative root mean square distance between structures,
# which serves as the reaction coordinate. Note that these are
# calculated from the final relaxed structures as the coordinates may
# have changed from the initial interpolation.
r = [0]
prev = structures[0]
for st in structures[1:]:
dists = np.array([s2.distance(s1) for s1, s2 in zip(prev, st)])
r.append(np.sqrt(np.sum(dists ** 2)))
prev = st
r = np.cumsum(r)
energies = []
forces = []
for i, o in enumerate(outcars):
o.read_neb()
energies.append(o.data["energy"])
if i in [0, len(outcars) - 1]:
forces.append(0)
else:
forces.append(o.data["tangent_force"])
energies = np.array(energies)
energies -= energies[0]
forces = np.array(forces)
self.r = np.array(r)
self.energies = energies
self.forces = forces
# We do a piecewise interpolation between the points. Each spline (
# cubic by default) is constrained by the boundary conditions of the
# energies and the tangent force, i.e., the derivative of
# the energy at each pair of points.
self.spline = PiecewisePolynomial(
self.r, np.array([self.energies, -self.forces]).T,
orders=interpolation_order)
def get_extrema(self, normalize_rxn_coordinate=True):
"""
Returns the positions of the extrema along the MEP. Both local
minimums and maximums are returned.
Args:
normalize_rxn_coordinate (bool): Whether to normalize the
reaction coordinate to between 0 and 1. Defaults to True.
Returns:
(min_extrema, max_extrema), where the extrema are given as
[(x1, y1), (x2, y2), ...].
"""
x = np.arange(0, np.max(self.r), 0.01)
y = self.spline(x) * 1000
scale = 1 if not normalize_rxn_coordinate else 1 / self.r[-1]
min_extrema = []
max_extrema = []
for i in range(1, len(x) - 1):
if y[i] < y[i-1] and y[i] < y[i+1]:
min_extrema.append((x[i] * scale, y[i]))
elif y[i] > y[i-1] and y[i] > y[i+1]:
max_extrema.append((x[i] * scale, y[i]))
return min_extrema, max_extrema
def get_plot(self, normalize_rxn_coordinate=True, label_barrier=True):
"""
Returns the NEB plot. Uses Henkelman's approach of spline fitting
each section of the reaction path based on tangent force and energies.
Args:
normalize_rxn_coordinate (bool): Whether to normalize the
reaction coordinate to between 0 and 1. Defaults to True.
label_barrier (bool): Whether to label the maximum barrier.
Returns:
matplotlib.pyplot object.
"""
plt = get_publication_quality_plot(12, 8)
scale = 1 if not normalize_rxn_coordinate else 1 / self.r[-1]
x = np.arange(0, np.max(self.r), 0.01)
y = self.spline(x) * 1000
plt.plot(x * scale, y, 'k-', linewidth=2)
plt.plot(self.r * scale, self.energies * 1000, 'ro', markersize=10)
plt.xlabel("Reaction coordinate")
plt.ylabel("Energy (meV)")
plt.ylim((np.min(y) - 10, np.max(y) * 1.02 + 20))
if label_barrier:
data = zip(x * scale, y)
barrier = max(data, key=lambda d: d[1])
plt.plot([0, barrier[0]], [barrier[1], barrier[1]], 'k--')
plt.annotate('%.0f meV' % barrier[1],
xy=(barrier[0] / 2, barrier[1] * 1.02),
xytext=(barrier[0] / 2, barrier[1] * 1.02),
horizontalalignment='center')
plt.tight_layout()
return plt
@classmethod
def from_dir(cls, root_dir, relaxation_dirs=None):
"""
Initializes a NEBAnalysis object from a directory of a NEB run.
Note that OUTCARs must be present in all image directories. For the
terminal OUTCARs from relaxation calculations, you can specify the
locations using relaxation_dir. If these are not specified, the code
will attempt to look for the OUTCARs in 00 and 0n directories,
followed by subdirs "start", "end" or "initial", "final" in the
root_dir. These are just some typical conventions used
preferentially in Shyue Ping's MAVRL research group. For the
non-terminal points, the CONTCAR is read to obtain structures. For
terminal points, the POSCAR is used. The image directories are
assumed to be the only directories that can be resolved to integers.
E.g., "00", "01", "02", "03", "04", "05", "06". The minimum
sub-directory structure that can be parsed is of the following form (
a 5-image example is shown):
00:
- POSCAR
- OUTCAR
01, 02, 03, 04, 05:
- CONTCAR
- OUTCAR
06:
- POSCAR
- OUTCAR
Args:
root_dir (str): Path to the root directory of the NEB calculation.
relaxation_dirs (tuple): This specifies the starting and ending
relaxation directories from which the OUTCARs are read for the
terminal points for the energies.
Returns:
NEBAnalysis object.
"""
neb_dirs = []
for d in os.listdir(root_dir):
pth = os.path.join(root_dir, d)
if os.path.isdir(pth) and d.isdigit():
i = int(d)
neb_dirs.append((i, pth))
neb_dirs = sorted(neb_dirs, key=lambda d: d[0])
outcars = []
structures = []
# Setup the search sequence for the OUTCARs for the terminal
# directories.
terminal_dirs = []
if relaxation_dirs is not None:
terminal_dirs.append(relaxation_dirs)
terminal_dirs.append((neb_dirs[0][1], neb_dirs[-1][1]))
terminal_dirs.append([os.path.join(root_dir, d)
for d in ["start", "end"]])
terminal_dirs.append([os.path.join(root_dir, d)
for d in ["initial", "final"]])
for i, d in neb_dirs:
outcar = glob.glob(os.path.join(d, "OUTCAR*"))
contcar = glob.glob(os.path.join(d, "CONTCAR*"))
poscar = glob.glob(os.path.join(d, "POSCAR*"))
terminal = i == 0 or i == neb_dirs[-1][0]
if terminal:
found = False
for ds in terminal_dirs:
od = ds[0] if i == 0 else ds[1]
outcar = glob.glob(os.path.join(od, "OUTCAR*"))
if outcar:
outcar = sorted(outcar)
outcars.append(Outcar(outcar[-1]))
found = True
break
if not found:
raise ValueError("OUTCAR cannot be found for terminal "
"point %s" % d)
structures.append(Poscar.from_file(poscar[0]).structure)
else:
outcars.append(Outcar(outcar[0]))
structures.append(Poscar.from_file(contcar[0]).structure)
return NEBAnalysis(outcars, structures)
|
ctoher/pymatgen
|
pymatgen/analysis/transition_state.py
|
Python
|
mit
| 9,167
|
[
"pymatgen"
] |
982cfe976f9b5670bac18a30a2244ea1a15c72afc95fe99a40eff9c5880b79c8
|
# Copyright 2013 anthony cantor
# This file is part of pyc.
#
# pyc is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pyc is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pyc. If not, see <http://www.gnu.org/licenses/>.
from pyc_astvisitor import ASTTxformer
import pyc_astvisitor
import pyc_vis
from pyc_log import *
import pyc_gen_name
from pyc_validator import assert_valid
import pyc_localize
from pyc_ir_nodes import *
import pyc_constants
import pyc_ir
import pyc_lineage
import ast
import copy
class Lamb(IRNode):
def __init__(self, **kwargs):
IRNode.__init__(
self,
tuple(['lamb', 'from_node']),
**kwargs
)
class Heapifier(ASTTxformer):
def __init__(self):
ASTTxformer.__init__(self)
self.lamb_nodes = []
self.mappings = {}
def visit_Module(self, node):
heap_vars = {}
locs = pyc_localize.locals(node)
locs = locs
self.log("locals: %r" % locs)
result = [pyc_vis.visit(self, n, heap_vars, locs) for n in node.body]
inits = self.init_local_heap_vars(locs, set([]), heap_vars)
self.log("heapify result: %r" % heap_vars)
self.patch_lamb_nodes()
return ast.Module(
body = inits + result
)
def heapify_name(self, node, new_name):
return pyc_ir.txform(make_subn(new_name, node.ctx.__class__, 0))
def heapify_switch(self, node, heap_vars):
if node.id in heap_vars:
return self.heapify_name(node, heap_vars[node.id])
else:
return copy_name(node)
def visit_Name(self, node, heap_vars, locals):
if node.id in pyc_constants.internal_names \
or node.id in pyc_constants.predefined_vars \
or node.id[0:2] == "ir":
return copy_name(node)
elif node.id in heap_vars or node.id not in locals:
heap_vars[node.id] = heap_name(node.id)
self.mappings[heap_vars[node.id]] = node.id
self.log(self.depth_fmt("heap: %s" % node.id))
return self.heapify_name(node, heap_vars[node.id])
else:
self.log(self.depth_fmt("defer: %s" % node.id))
exp = NameWrap(value=Lamb(
lamb = lambda : self.heapify_switch(node, heap_vars),
from_node = node
))
self.lamb_nodes.append(exp)
#we will edit this node later by invoking the lambda
return exp
def init_heap_var(self, hv, value):
return make_assign(
var_set(hv),
pyc_ir.txform(
ast.List(
elts = [value],
ctx = ast.Load()
)
)
)
def init_local_heap_vars(self, locals, params, heap_vars):
inits = []
for name in locals:
if name not in heap_vars:
continue
val = var_ref(name) if name in params else false_node()
inits.append(self.init_heap_var(heap_vars[name], val))
return inits
def visit_Bloc(self, node, heap_vars, dummy):
locals = pyc_localize.locals(node)
self.log(self.depth_fmt("locals: %r" % locals) )
prms = pyc_astvisitor.names(node.args)
self.log(self.depth_fmt("params: %r" % prms) )
result_body = [
pyc_vis.visit(self, n, heap_vars, locals) for n in node.body
]
#pass all params for locals and empty heap_vars
#because none of the fn arg references should be "heapified"
#because we copy those into new heapified vars before the function body.
#due to above pyc_vis call, all references to these parameters will
#have been converted to the new heapified version we will initialize
#after we patch the lambda nodes
result_args = pyc_vis.visit(self, node.args, {}, prms )
inits = self.init_local_heap_vars(locals, prms, heap_vars)
return Bloc(
args = result_args,
body = inits + result_body
)
def patch_lamb_nodes(self):
log("patch lamb nodes:")
for expr in self.lamb_nodes:
new_val = expr.value.lamb()
pyc_lineage.bequeath_lineage(expr.value.from_node, new_val, self.__class__.__name__)
expr.value = new_val
log(" ->%s" % ast.dump(expr.value))
def heap_name(name):
return ("heap_%s" % name)
def txform(as_tree, **kwargs):
v = Heapifier()
v.log = lambda s: log("Heapifier : %s" % s)
if 'tracer' in kwargs:
v.tracer = kwargs['tracer']
return (pyc_vis.walk(v, as_tree), v.mappings)
|
cantora/pyc
|
pyc_heapify.py
|
Python
|
gpl-3.0
| 4,434
|
[
"VisIt"
] |
efe340f231109733176adba4eafee03700dc52aab580bdd2023e06de69b1fd96
|
#!/usr/bin/env python
# This file is part of the Printrun suite.
#
# Printrun is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Printrun is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Printrun. If not, see <http://www.gnu.org/licenses/>.
import cmd
import glob
import os
import time
import sys
import subprocess
import traceback
import codecs
import shlex
import argparse
import locale
import printcore
from printrun.printrun_utils import install_locale
install_locale('pronterface')
from printrun import gcoder
from functools import wraps
if os.name == "nt":
try:
import _winreg
except:
pass
READLINE = True
try:
import readline
try:
readline.rl.mode.show_all_if_ambiguous = "on" # config pyreadline on windows
except:
pass
except:
READLINE = False # neither readline module is available
def dosify(name):
return os.path.split(name)[1].split(".")[0][:8] + ".g"
def setting_add_tooltip(func):
@wraps(func)
def decorator(self, *args, **kwargs):
widget = func(self, *args, **kwargs)
helptxt = self.help or ""
sep, deftxt = "", ""
if len(helptxt):
sep = "\n"
if helptxt.find("\n") >= 0:
sep = "\n\n"
if self.default is not "":
deftxt = _("Default: ")
resethelp = _("(Control-doubleclick to reset to default value)")
if len(repr(self.default)) > 10:
deftxt += "\n " + repr(self.default).strip("'") + "\n" + resethelp
else:
deftxt += repr(self.default) + " " + resethelp
helptxt += sep + deftxt
if len(helptxt):
widget.SetToolTipString(helptxt)
return widget
return decorator
class Setting(object):
DEFAULT_GROUP = "Printer"
hidden = False
def __init__(self, name, default, label = None, help = None, group = None):
self.name = name
self.default = default
self._value = default
self.label = label
self.help = help
self.group = group if group else Setting.DEFAULT_GROUP
def _get_value(self):
return self._value
def _set_value(self, value):
raise NotImplementedError
value = property(_get_value, _set_value)
def set_default(self, e):
import wx
if e.CmdDown() and e.ButtonDClick() and self.default is not "":
confirmation = wx.MessageDialog(None, _("Are you sure you want to reset the setting to the default value: {0!r} ?").format(self.default), _("Confirm set default"), wx.ICON_EXCLAMATION | wx.YES_NO | wx.NO_DEFAULT)
if confirmation.ShowModal() == wx.ID_YES:
self._set_value(self.default)
else:
e.Skip()
@setting_add_tooltip
def get_label(self, parent):
import wx
widget = wx.StaticText(parent, -1, self.label or self.name)
widget.set_default = self.set_default
return widget
@setting_add_tooltip
def get_widget(self, parent):
return self.get_specific_widget(parent)
def get_specific_widget(self, parent):
raise NotImplementedError
def update(self):
raise NotImplementedError
def __str__(self):
return self.name
def __repr__(self):
return self.name
class HiddenSetting(Setting):
hidden = True
def _set_value(self, value):
self._value = value
value = property(Setting._get_value, _set_value)
class wxSetting(Setting):
widget = None
def _set_value(self, value):
self._value = value
if self.widget:
self.widget.SetValue(value)
value = property(Setting._get_value, _set_value)
def update(self):
self.value = self.widget.GetValue()
class StringSetting(wxSetting):
def get_specific_widget(self, parent):
import wx
self.widget = wx.TextCtrl(parent, -1, str(self.value))
return self.widget
class ComboSetting(wxSetting):
def __init__(self, name, default, choices, label = None, help = None, group = None):
super(ComboSetting, self).__init__(name, default, label, help, group)
self.choices = choices
def get_specific_widget(self, parent):
import wx
self.widget = wx.ComboBox(parent, -1, str(self.value), choices = self.choices, style = wx.CB_DROPDOWN)
return self.widget
class SpinSetting(wxSetting):
def __init__(self, name, default, min, max, label = None, help = None, group = None):
super(SpinSetting, self).__init__(name, default, label, help, group)
self.min = min
self.max = max
def get_specific_widget(self, parent):
import wx
self.widget = wx.SpinCtrl(parent, -1, min = self.min, max = self.max)
self.widget.SetValue(self.value)
return self.widget
class FloatSpinSetting(SpinSetting):
def get_specific_widget(self, parent):
from wx.lib.agw.floatspin import FloatSpin
self.widget = FloatSpin(parent, -1, value = self.value, min_val = self.min, max_val = self.max, digits = 2)
return self.widget
class BooleanSetting(wxSetting):
def _get_value(self):
return bool(self._value)
def _set_value(self, value):
self._value = value
if self.widget:
self.widget.SetValue(bool(value))
value = property(_get_value, _set_value)
def get_specific_widget(self, parent):
import wx
self.widget = wx.CheckBox(parent, -1)
self.widget.SetValue(bool(self.value))
return self.widget
class StaticTextSetting(wxSetting):
def __init__(self, name, label = " ", text = "", help = None, group = None):
super(StaticTextSetting, self).__init__(name, "", label, help, group)
self.text = text
def update(self):
pass
def _get_value(self):
return ""
def _set_value(self, value):
pass
def get_specific_widget(self, parent):
import wx
self.widget = wx.StaticText(parent, -1, self.text)
return self.widget
class Settings(object):
#def _temperature_alias(self): return {"pla":210, "abs":230, "off":0}
#def _temperature_validate(self, v):
# if v < 0: raise ValueError("You cannot set negative temperatures. To turn the hotend off entirely, set its temperature to 0.")
#def _bedtemperature_alias(self): return {"pla":60, "abs":110, "off":0}
def _baudrate_list(self): return ["2400", "9600", "19200", "38400", "57600", "115200", "250000"]
def __init__(self):
# defaults here.
# the initial value determines the type
self._add(StringSetting("port", "", _("Serial port"), _("Port used to communicate with printer")))
self._add(ComboSetting("baudrate", 115200, self._baudrate_list(), _("Baud rate"), _("Communications Speed")))
self._add(SpinSetting("bedtemp_abs", 110, 0, 400, _("Bed temperature for ABS"), _("Heated Build Platform temp for ABS (deg C)"), "Printer"))
self._add(SpinSetting("bedtemp_pla", 60, 0, 400, _("Bed temperature for PLA"), _("Heated Build Platform temp for PLA (deg C)"), "Printer"))
self._add(SpinSetting("temperature_abs", 230, 0, 400, _("Extruder temperature for ABS"), _("Extruder temp for ABS (deg C)"), "Printer"))
self._add(SpinSetting("temperature_pla", 185, 0, 400, _("Extruder temperature for PLA"), _("Extruder temp for PLA (deg C)"), "Printer"))
self._add(SpinSetting("xy_feedrate", 3000, 0, 50000, _("X && Y manual feedrate"), _("Feedrate for Control Panel Moves in X and Y (mm/min)"), "Printer"))
self._add(SpinSetting("z_feedrate", 200, 0, 50000, _("Z manual feedrate"), _("Feedrate for Control Panel Moves in Z (mm/min)"), "Printer"))
self._add(SpinSetting("e_feedrate", 100, 0, 1000, _("E manual feedrate"), _("Feedrate for Control Panel Moves in Extrusions (mm/min)"), "Printer"))
self._add(StringSetting("slicecommand", "python skeinforge/skeinforge_application/skeinforge_utilities/skeinforge_craft.py $s", _("Slice command"), _("Slice command"), "External"))
self._add(StringSetting("sliceoptscommand", "python skeinforge/skeinforge_application/skeinforge.py", _("Slicer options command"), _("Slice settings command"), "External"))
self._add(StringSetting("final_command", "", _("Final command"), _("Executable to run when the print is finished"), "External"))
self._add(HiddenSetting("project_offset_x", 0.0))
self._add(HiddenSetting("project_offset_y", 0.0))
self._add(HiddenSetting("project_interval", 2.0))
self._add(HiddenSetting("project_pause", 2.5))
self._add(HiddenSetting("project_scale", 1.0))
self._add(HiddenSetting("project_x", 1024.0))
self._add(HiddenSetting("project_y", 768.0))
self._add(HiddenSetting("project_projected_x", 150.0))
self._add(HiddenSetting("project_direction", "Top Down"))
self._add(HiddenSetting("project_overshoot", 3.0))
self._add(HiddenSetting("project_z_axis_rate", 200))
self._add(HiddenSetting("project_layer", 0.1))
self._add(HiddenSetting("project_prelift_gcode", ""))
self._add(HiddenSetting("project_postlift_gcode", ""))
self._add(HiddenSetting("pause_between_prints", True))
_settings = []
def __setattr__(self, name, value):
if name.startswith("_"):
return object.__setattr__(self, name, value)
if isinstance(value, Setting):
if not value.hidden:
self._settings.append(value)
object.__setattr__(self, "_" + name, value)
elif hasattr(self, "_" + name):
getattr(self, "_" + name).value = value
else:
setattr(self, name, StringSetting(name = name, default = value))
def __getattr__(self, name):
if name.startswith("_"):
return object.__getattribute__(self, name)
return getattr(self, "_" + name).value
def _add(self, setting, callback = None):
setattr(self, setting.name, setting)
if callback:
setattr(self, "_" + setting.name + "_cb", callback)
def _set(self, key, value):
try:
value = getattr(self, "_%s_alias" % key)()[value]
except KeyError:
pass
except AttributeError:
pass
try:
getattr(self, "_%s_validate" % key)(value)
except AttributeError:
pass
t = type(getattr(self, key))
if t == bool and value == "False": setattr(self, key, False)
else: setattr(self, key, t(value))
try:
getattr(self, "_%s_cb" % key)(key, value)
except AttributeError:
pass
return value
def _tabcomplete(self, key):
try:
return getattr(self, "_%s_list" % key)()
except AttributeError:
pass
try:
return getattr(self, "_%s_alias" % key)().keys()
except AttributeError:
pass
return []
def _all_settings(self):
return self._settings
class Status:
def __init__(self):
self.extruder_temp = 0
self.extruder_temp_target = 0
self.bed_temp = 0
self.bed_temp_target = 0
self.print_job = None
self.print_job_progress = 1.0
def update_tempreading(self, tempstr):
r = tempstr.split()
# eg. r = ["ok", "T:20.5", "/0.0", "B:0.0", "/0.0", "@:0"]
if len(r) == 6:
self.extruder_temp = float(r[1][2:])
self.extruder_temp_target = float(r[2][1:])
self.bed_temp = float(r[3][2:])
self.bed_temp_target = float(r[4][1:])
@property
def bed_enabled(self):
return self.bed_temp != 0
@property
def extruder_enabled(self):
return self.extruder_temp != 0
class pronsole(cmd.Cmd):
def __init__(self):
cmd.Cmd.__init__(self)
if not READLINE:
self.completekey = None
self.status = Status()
self.dynamic_temp = False
self.p = printcore.printcore()
self.p.recvcb = self.recvcb
self.recvlisteners = []
self.in_macro = False
self.p.onlinecb = self.online
self.fgcode = None
self.listing = 0
self.sdfiles = []
self.paused = False
self.sdprinting = 0
self.temps = {"pla": "185", "abs": "230", "off": "0"}
self.bedtemps = {"pla": "60", "abs": "110", "off": "0"}
self.percentdone = 0
self.tempreadings = ""
self.macros = {}
self.rc_loaded = False
self.processing_rc = False
self.processing_args = False
self.settings = Settings()
self.settings._port_list = self.scanserial
self.settings._temperature_abs_cb = self.set_temp_preset
self.settings._temperature_pla_cb = self.set_temp_preset
self.settings._bedtemp_abs_cb = self.set_temp_preset
self.settings._bedtemp_pla_cb = self.set_temp_preset
self.monitoring = 0
self.silent = False
self.commandprefixes = 'MGT$'
self.promptstrs = {"offline": "%(bold)suninitialized>%(normal)s ",
"fallback": "%(bold)sPC>%(normal)s ",
"macro": "%(bold)s..>%(normal)s ",
"online": "%(bold)sT:%(extruder_temp_fancy)s %(progress_fancy)s >%(normal)s "}
def confirm(self):
y_or_n = raw_input("y/n: ")
if y_or_n == "y":
return True
elif y_or_n != "n":
return self.confirm()
return False
def log(self, *msg):
print u"".join(unicode(i) for i in msg)
def logError(self, *msg):
print u"".join(unicode(i) for i in msg)
def promptf(self):
"""A function to generate prompts so that we can do dynamic prompts. """
if self.in_macro:
promptstr = self.promptstrs["macro"]
elif not self.p.online:
promptstr = self.promptstrs["offline"]
elif self.status.extruder_enabled:
promptstr = self.promptstrs["online"]
else:
promptstr = self.promptstrs["fallback"]
if not "%" in promptstr:
return promptstr
else:
specials = {}
specials["extruder_temp"] = str(int(self.status.extruder_temp))
specials["extruder_temp_target"] = str(int(self.status.extruder_temp_target))
if self.status.extruder_temp_target == 0:
specials["extruder_temp_fancy"] = str(int(self.status.extruder_temp))
else:
specials["extruder_temp_fancy"] = "%s/%s" % (str(int(self.status.extruder_temp)), str(int(self.status.extruder_temp_target)))
if self.p.printing:
progress = int(1000 * float(self.p.queueindex) / len(self.p.mainqueue)) / 10
elif self.sdprinting:
progress = self.percentdone
else:
progress = 0.0
specials["progress"] = str(progress)
if self.p.printing or self.sdprinting:
specials["progress_fancy"] = str(progress) + "%"
else:
specials["progress_fancy"] = "?%"
specials["bold"] = "\033[01m"
specials["normal"] = "\033[00m"
return promptstr % specials
def postcmd(self, stop, line):
""" A hook we override to generate prompts after
each command is executed, for the next prompt.
We also use it to send M105 commands so that
temp info gets updated for the prompt."""
if self.p.online and self.dynamic_temp:
self.p.send_now("M105")
self.prompt = self.promptf()
return stop
def set_temp_preset(self, key, value):
if not key.startswith("bed"):
self.temps["pla"] = str(self.settings.temperature_pla)
self.temps["abs"] = str(self.settings.temperature_abs)
self.log("Hotend temperature presets updated, pla:%s, abs:%s" % (self.temps["pla"], self.temps["abs"]))
else:
self.bedtemps["pla"] = str(self.settings.bedtemp_pla)
self.bedtemps["abs"] = str(self.settings.bedtemp_abs)
self.log("Bed temperature presets updated, pla:%s, abs:%s" % (self.bedtemps["pla"], self.bedtemps["abs"]))
def scanserial(self):
"""scan for available ports. return a list of device names."""
baselist = []
if os.name == "nt":
try:
key = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, "HARDWARE\\DEVICEMAP\\SERIALCOMM")
i = 0
while(1):
baselist += [_winreg.EnumValue(key, i)[1]]
i += 1
except:
pass
for g in ['/dev/ttyUSB*', '/dev/ttyACM*', "/dev/tty.*", "/dev/cu.*", "/dev/rfcomm*"]:
baselist += glob.glob(g)
return filter(self._bluetoothSerialFilter, baselist)
def _bluetoothSerialFilter(self, serial):
return not ("Bluetooth" in serial or "FireFly" in serial)
def online(self):
self.log("\rPrinter is now online")
self.write_prompt()
def write_prompt(self):
sys.stdout.write(self.promptf())
sys.stdout.flush()
def help_help(self, l):
self.do_help("")
def do_gcodes(self, l):
self.help_gcodes()
def help_gcodes(self):
self.log("Gcodes are passed through to the printer as they are")
def complete_macro(self, text, line, begidx, endidx):
if (len(line.split()) == 2 and line[-1] != " ") or (len(line.split()) == 1 and line[-1] == " "):
return [i for i in self.macros.keys() if i.startswith(text)]
elif(len(line.split()) == 3 or (len(line.split()) == 2 and line[-1] == " ")):
return [i for i in ["/D", "/S"] + self.completenames(text) if i.startswith(text)]
else:
return []
def hook_macro(self, l):
l = l.rstrip()
ls = l.lstrip()
ws = l[:len(l) - len(ls)] # just leading whitespace
if len(ws) == 0:
self.end_macro()
# pass the unprocessed line to regular command processor to not require empty line in .pronsolerc
return self.onecmd(l)
self.cur_macro_def += l + "\n"
def end_macro(self):
if "onecmd" in self.__dict__: del self.onecmd # remove override
self.in_macro = False
self.prompt = self.promptf()
if self.cur_macro_def != "":
self.macros[self.cur_macro_name] = self.cur_macro_def
macro = self.compile_macro(self.cur_macro_name, self.cur_macro_def)
setattr(self.__class__, "do_" + self.cur_macro_name, lambda self, largs, macro = macro: macro(self, *largs.split()))
setattr(self.__class__, "help_" + self.cur_macro_name, lambda self, macro_name = self.cur_macro_name: self.subhelp_macro(macro_name))
if not self.processing_rc:
self.log("Macro '" + self.cur_macro_name + "' defined")
# save it
if not self.processing_args:
macro_key = "macro " + self.cur_macro_name
macro_def = macro_key
if "\n" in self.cur_macro_def:
macro_def += "\n"
else:
macro_def += " "
macro_def += self.cur_macro_def
self.save_in_rc(macro_key, macro_def)
else:
self.logError("Empty macro - cancelled")
del self.cur_macro_name, self.cur_macro_def
def parseusercmd(self, line):
pass
def compile_macro_line(self, line):
line = line.rstrip()
ls = line.lstrip()
ws = line[:len(line) - len(ls)] # just leading whitespace
if ls == "" or ls.startswith('#'): return "" # no code
if ls.startswith('!'):
return ws + ls[1:] + "\n" # python mode
else:
ls = ls.replace('"', '\\"') # need to escape double quotes
ret = ws + 'self.parseusercmd("' + ls + '".format(*arg))\n' # parametric command mode
return ret + ws + 'self.onecmd("' + ls + '".format(*arg))\n'
def compile_macro(self, macro_name, macro_def):
if macro_def.strip() == "":
self.logError("Empty macro - cancelled")
return
pycode = "def macro(self,*arg):\n"
if "\n" not in macro_def.strip():
pycode += self.compile_macro_line(" " + macro_def.strip())
else:
lines = macro_def.split("\n")
for l in lines:
pycode += self.compile_macro_line(l)
exec pycode
return macro
def start_macro(self, macro_name, prev_definition = "", suppress_instructions = False):
if not self.processing_rc and not suppress_instructions:
self.logError("Enter macro using indented lines, end with empty line")
self.cur_macro_name = macro_name
self.cur_macro_def = ""
self.onecmd = self.hook_macro # override onecmd temporarily
self.in_macro = False
self.prompt = self.promptf()
def delete_macro(self, macro_name):
if macro_name in self.macros.keys():
delattr(self.__class__, "do_" + macro_name)
del self.macros[macro_name]
self.log("Macro '" + macro_name + "' removed")
if not self.processing_rc and not self.processing_args:
self.save_in_rc("macro " + macro_name, "")
else:
self.logError("Macro '" + macro_name + "' is not defined")
def do_macro(self, args):
if args.strip() == "":
self.print_topics("User-defined macros", map(str, self.macros.keys()), 15, 80)
return
arglist = args.split(None, 1)
macro_name = arglist[0]
if macro_name not in self.macros and hasattr(self.__class__, "do_" + macro_name):
self.logError("Name '" + macro_name + "' is being used by built-in command")
return
if len(arglist) == 2:
macro_def = arglist[1]
if macro_def.lower() == "/d":
self.delete_macro(macro_name)
return
if macro_def.lower() == "/s":
self.subhelp_macro(macro_name)
return
self.cur_macro_def = macro_def
self.cur_macro_name = macro_name
self.end_macro()
return
if macro_name in self.macros:
self.start_macro(macro_name, self.macros[macro_name])
else:
self.start_macro(macro_name)
def help_macro(self):
self.log("Define single-line macro: macro <name> <definition>")
self.log("Define multi-line macro: macro <name>")
self.log("Enter macro definition in indented lines. Use {0} .. {N} to substitute macro arguments")
self.log("Enter python code, prefixed with ! Use arg[0] .. arg[N] to substitute macro arguments")
self.log("Delete macro: macro <name> /d")
self.log("Show macro definition: macro <name> /s")
self.log("'macro' without arguments displays list of defined macros")
def subhelp_macro(self, macro_name):
if macro_name in self.macros.keys():
macro_def = self.macros[macro_name]
if "\n" in macro_def:
self.log("Macro '" + macro_name + "' defined as:")
self.log(self.macros[macro_name] + "----------------")
else:
self.log("Macro '" + macro_name + "' defined as: '" + macro_def + "'")
else:
self.logError("Macro '" + macro_name + "' is not defined")
def set(self, var, str):
try:
t = type(getattr(self.settings, var))
value = self.settings._set(var, str)
if not self.processing_rc and not self.processing_args:
self.save_in_rc("set " + var, "set %s %s" % (var, value))
except AttributeError:
self.logError("Unknown variable '%s'" % var)
except ValueError, ve:
self.logError("Bad value for variable '%s', expecting %s (%s)" % (var, repr(t)[1:-1], ve.args[0]))
def do_set(self, argl):
args = argl.split(None, 1)
if len(args) < 1:
for k in [kk for kk in dir(self.settings) if not kk.startswith("_")]:
self.log("%s = %s" % (k, str(getattr(self.settings, k))))
return
if len(args) < 2:
try:
self.log("%s = %s" % (args[0], getattr(self.settings, args[0])))
except AttributeError:
self.logError("Unknown variable '%s'" % args[0])
return
self.set(args[0], args[1])
def help_set(self):
self.log("Set variable: set <variable> <value>")
self.log("Show variable: set <variable>")
self.log("'set' without arguments displays all variables")
def complete_set(self, text, line, begidx, endidx):
if (len(line.split()) == 2 and line[-1] != " ") or (len(line.split()) == 1 and line[-1] == " "):
return [i for i in dir(self.settings) if not i.startswith("_") and i.startswith(text)]
elif(len(line.split()) == 3 or (len(line.split()) == 2 and line[-1] == " ")):
return [i for i in self.settings._tabcomplete(line.split()[1]) if i.startswith(text)]
else:
return []
def postloop(self):
self.p.disconnect()
cmd.Cmd.postloop(self)
def load_rc(self, rc_filename):
self.processing_rc = True
try:
rc = codecs.open(rc_filename, "r", "utf-8")
self.rc_filename = os.path.abspath(rc_filename)
for rc_cmd in rc:
if not rc_cmd.lstrip().startswith("#"):
self.onecmd(rc_cmd)
rc.close()
if hasattr(self, "cur_macro_def"):
self.end_macro()
self.rc_loaded = True
finally:
self.processing_rc = False
def load_default_rc(self, rc_filename = ".pronsolerc"):
if rc_filename == ".pronsolerc" and hasattr(sys, "frozen") and sys.frozen in ["windows_exe", "console_exe"]:
rc_filename = "printrunconf.ini"
try:
try:
self.load_rc(os.path.join(os.path.expanduser("~"), rc_filename))
except IOError:
self.load_rc(rc_filename)
except IOError:
# make sure the filename is initialized
self.rc_filename = os.path.abspath(os.path.join(os.path.expanduser("~"), rc_filename))
def save_in_rc(self, key, definition):
"""
Saves or updates macro or other definitions in .pronsolerc
key is prefix that determines what is being defined/updated (e.g. 'macro foo')
definition is the full definition (that is written to file). (e.g. 'macro foo move x 10')
Set key as empty string to just add (and not overwrite)
Set definition as empty string to remove it from .pronsolerc
To delete line from .pronsolerc, set key as the line contents, and definition as empty string
Only first definition with given key is overwritten.
Updates are made in the same file position.
Additions are made to the end of the file.
"""
rci, rco = None, None
if definition != "" and not definition.endswith("\n"):
definition += "\n"
try:
written = False
if os.path.exists(self.rc_filename):
import shutil
shutil.copy(self.rc_filename, self.rc_filename + "~bak")
rci = codecs.open(self.rc_filename + "~bak", "r", "utf-8")
rco = codecs.open(self.rc_filename, "w", "utf-8")
if rci is not None:
overwriting = False
for rc_cmd in rci:
l = rc_cmd.rstrip()
ls = l.lstrip()
ws = l[:len(l) - len(ls)] # just leading whitespace
if overwriting and len(ws) == 0:
overwriting = False
if not written and key != "" and rc_cmd.startswith(key) and (rc_cmd + "\n")[len(key)].isspace():
overwriting = True
written = True
rco.write(definition)
if not overwriting:
rco.write(rc_cmd)
if not rc_cmd.endswith("\n"): rco.write("\n")
if not written:
rco.write(definition)
if rci is not None:
rci.close()
rco.close()
#if definition != "":
# self.log("Saved '"+key+"' to '"+self.rc_filename+"'")
#else:
# self.log("Removed '"+key+"' from '"+self.rc_filename+"'")
except Exception, e:
self.logError("Saving failed for ", key + ":", str(e))
finally:
del rci, rco
def preloop(self):
self.log("Welcome to the printer console! Type \"help\" for a list of available commands.")
self.prompt = self.promptf()
cmd.Cmd.preloop(self)
def do_connect(self, l):
a = l.split()
p = self.scanserial()
port = self.settings.port
if (port == "" or port not in p) and len(p) > 0:
port = p[0]
baud = self.settings.baudrate or 115200
if len(a) > 0:
port = a[0]
if len(a) > 1:
try:
baud = int(a[1])
except:
self.log("Bad baud value '" + a[1] + "' ignored")
if len(p) == 0 and not port:
self.log("No serial ports detected - please specify a port")
return
if len(a) == 0:
self.log("No port specified - connecting to %s at %dbps" % (port, baud))
if port != self.settings.port:
self.settings.port = port
self.save_in_rc("set port", "set port %s" % port)
if baud != self.settings.baudrate:
self.settings.baudrate = baud
self.save_in_rc("set baudrate", "set baudrate %d" % baud)
self.p.connect(port, baud)
def help_connect(self):
self.log("Connect to printer")
self.log("connect <port> <baudrate>")
self.log("If port and baudrate are not specified, connects to first detected port at 115200bps")
ports = self.scanserial()
if(len(ports)):
self.log("Available ports: ", " ".join(ports))
else:
self.log("No serial ports were automatically found.")
def complete_connect(self, text, line, begidx, endidx):
if (len(line.split()) == 2 and line[-1] != " ") or (len(line.split()) == 1 and line[-1] == " "):
return [i for i in self.scanserial() if i.startswith(text)]
elif(len(line.split()) == 3 or (len(line.split()) == 2 and line[-1] == " ")):
return [i for i in ["2400", "9600", "19200", "38400", "57600", "115200"] if i.startswith(text)]
else:
return []
def do_disconnect(self, l):
self.p.disconnect()
def help_disconnect(self):
self.log("Disconnects from the printer")
def do_load(self, filename):
self._do_load(filename)
def _do_load(self, filename):
if not filename:
self.logError("No file name given.")
return
self.logError("Loading file: " + filename)
if not os.path.exists(filename):
self.logError("File not found!")
return
self.fgcode = gcoder.GCode(open(filename))
self.filename = filename
self.log("Loaded %s, %d lines." % (filename, len(self.fgcode)))
def complete_load(self, text, line, begidx, endidx):
s = line.split()
if len(s) > 2:
return []
if (len(s) == 1 and line[-1] == " ") or (len(s) == 2 and line[-1] != " "):
if len(s) > 1:
return [i[len(s[1]) - len(text):] for i in glob.glob(s[1] + "*/") + glob.glob(s[1] + "*.g*")]
else:
return glob.glob("*/") + glob.glob("*.g*")
def help_load(self):
self.log("Loads a gcode file (with tab-completion)")
def do_upload(self, l):
names = l.split()
if len(names) == 2:
filename = names[0]
targetname = names[1]
else:
self.logError(_("Please enter target name in 8.3 format."))
return
if not self.p.online:
self.logError(_("Not connected to printer."))
return
self._do_load(filename)
self.log(_("Uploading as %s") % targetname)
self.log(_("Uploading %s") % self.filename)
self.p.send_now("M28 " + targetname)
self.log(_("Press Ctrl-C to interrupt upload."))
self.p.startprint(self.fgcode)
try:
sys.stdout.write(_("Progress: ") + "00.0%")
sys.stdout.flush()
time.sleep(1)
while self.p.printing:
time.sleep(1)
sys.stdout.write("\b\b\b\b\b%04.1f%%" % (100 * float(self.p.queueindex) / len(self.p.mainqueue),))
sys.stdout.flush()
self.p.send_now("M29 " + targetname)
self.sleep(0.2)
self.p.clear = 1
self._do_ls(False)
self.log("\b\b\b\b\b100%.")
self.log(_("Upload completed. %s should now be on the card.") % targetname)
return
except:
self.logError(_("...interrupted!"))
self.p.pause()
self.p.send_now("M29 " + targetname)
time.sleep(0.2)
self.p.clear = 1
self.p.startprint(None)
self.logError(_("A partial file named %s may have been written to the sd card.") % targetname)
def complete_upload(self, text, line, begidx, endidx):
s = line.split()
if len(s) > 2:
return []
if (len(s) == 1 and line[-1] == " ") or (len(s) == 2 and line[-1] != " "):
if len(s) > 1:
return [i[len(s[1]) - len(text):] for i in glob.glob(s[1] + "*/") + glob.glob(s[1] + "*.g*")]
else:
return glob.glob("*/") + glob.glob("*.g*")
def help_upload(self):
self.log("Uploads a gcode file to the sd card")
def help_print(self):
if not self.fgcode:
self.log(_("Send a loaded gcode file to the printer. Load a file with the load command first."))
else:
self.log(_("Send a loaded gcode file to the printer. You have %s loaded right now.") % self.filename)
def do_print(self, l):
if not self.fgcode:
self.logError(_("No file loaded. Please use load first."))
return
if not self.p.online:
self.logError(_("Not connected to printer."))
return
self.log(_("Printing %s") % self.filename)
self.log(_("You can monitor the print with the monitor command."))
self.p.startprint(self.fgcode)
def do_pause(self, l):
if self.sdprinting:
self.p.send_now("M25")
else:
if not self.p.printing:
self.logError(_("Not printing, cannot pause."))
return
self.p.pause()
self.paused = True
def help_pause(self):
self.log(_("Pauses a running print"))
def pause(self, event):
return self.do_pause(None)
def do_resume(self, l):
if not self.paused:
self.logError(_("Not paused, unable to resume. Start a print first."))
return
self.paused = False
if self.sdprinting:
self.p.send_now("M24")
return
else:
self.p.resume()
def help_resume(self):
self.log(_("Resumes a paused print."))
def emptyline(self):
pass
def do_shell(self, l):
exec(l)
def listfiles(self, line, echo = False):
if "Begin file list" in line:
self.listing = 1
elif "End file list" in line:
self.listing = 0
self.recvlisteners.remove(self.listfiles)
if echo:
self.log(_("Files on SD card:"))
self.log("\n".join(self.sdfiles))
elif self.listing:
self.sdfiles.append(line.strip().lower())
def _do_ls(self, echo):
# FIXME: this was 2, but I think it should rather be 0 as in do_upload
self.listing = 0
self.sdfiles = []
self.recvlisteners.append(lambda l: self.listfiles(l, echo))
self.p.send_now("M20")
def do_ls(self, l):
if not self.p.online:
self.logError(_("Printer is not online. Please connect to it first."))
return
self._do_ls(True)
def help_ls(self):
self.log(_("Lists files on the SD card"))
def waitforsdresponse(self, l):
if "file.open failed" in l:
self.logError(_("Opening file failed."))
self.recvlisteners.remove(self.waitforsdresponse)
return
if "File opened" in l:
self.log(l)
if "File selected" in l:
self.log(_("Starting print"))
self.p.send_now("M24")
self.sdprinting = 1
#self.recvlisteners.remove(self.waitforsdresponse)
return
if "Done printing file" in l:
self.log(l)
self.sdprinting = 0
self.recvlisteners.remove(self.waitforsdresponse)
return
if "SD printing byte" in l:
#M27 handler
try:
resp = l.split()
vals = resp[-1].split("/")
self.percentdone = 100.0 * int(vals[0]) / int(vals[1])
except:
pass
def do_reset(self, l):
self.p.reset()
def help_reset(self):
self.log(_("Resets the printer."))
def do_sdprint(self, l):
if not self.p.online:
self.log(_("Printer is not online. Please connect to it first."))
return
self._do_ls(False)
while self.listfiles in self.recvlisteners:
time.sleep(0.1)
if l.lower() not in self.sdfiles:
self.log(_("File is not present on card. Please upload it first."))
return
self.recvlisteners.append(self.waitforsdresponse)
self.p.send_now("M23 " + l.lower())
self.log(_("Printing file: %s from SD card.") % l.lower())
self.log(_("Requesting SD print..."))
time.sleep(1)
def help_sdprint(self):
self.log(_("Print a file from the SD card. Tab completes with available file names."))
self.log(_("sdprint filename.g"))
def complete_sdprint(self, text, line, begidx, endidx):
if not self.sdfiles and self.p.online:
self._do_ls(False)
while self.listfiles in self.recvlisteners:
time.sleep(0.1)
if (len(line.split()) == 2 and line[-1] != " ") or (len(line.split()) == 1 and line[-1] == " "):
return [i for i in self.sdfiles if i.startswith(text)]
def recvcb(self, l):
if "T:" in l:
self.tempreadings = l
self.status.update_tempreading(l)
tstring = l.rstrip()
if tstring != "ok" and not self.listing and not self.monitoring:
if tstring[:5] == "echo:":
tstring = tstring[5:].lstrip()
if self.silent is False: print "\r" + tstring.ljust(15)
sys.stdout.write(self.promptf())
sys.stdout.flush()
for i in self.recvlisteners:
i(l)
def help_shell(self):
self.log("Executes a python command. Example:")
self.log("! os.listdir('.')")
def default(self, l):
if l[0] in self.commandprefixes.upper():
if self.p and self.p.online:
if not self.p.loud:
self.log("SENDING:" + l)
self.p.send_now(l)
else:
self.logError(_("Printer is not online."))
return
elif l[0] in self.commandprefixes.lower():
if self.p and self.p.online:
if not self.p.loud:
self.log("SENDING:" + l.upper())
self.p.send_now(l.upper())
else:
self.logError(_("Printer is not online."))
return
elif l[0] == "@":
if self.p and self.p.online:
if not self.p.loud:
self.log("SENDING:" + l[1:])
self.p.send_now(l[1:])
else:
self.logError(_("Printer is not online."))
return
else:
cmd.Cmd.default(self, l)
def tempcb(self, l):
if "T:" in l:
self.log(l.strip().replace("T", "Hotend").replace("B", "Bed").replace("ok ", ""))
def do_gettemp(self, l):
if "dynamic" in l:
self.dynamic_temp = True
if self.p.online:
self.p.send_now("M105")
time.sleep(0.75)
if not self.status.bed_enabled:
print "Hotend: %s/%s" % (self.status.extruder_temp, self.status.extruder_temp_target)
else:
print "Hotend: %s/%s" % (self.status.extruder_temp, self.status.extruder_temp_target)
print "Bed: %s/%s" % (self.status.bed_temp, self.status.bed_temp_target)
def help_gettemp(self):
self.log(_("Read the extruder and bed temperature."))
def do_settemp(self, l):
l = l.lower().replace(", ", ".")
for i in self.temps.keys():
l = l.replace(i, self.temps[i])
try:
f = float(l)
except:
self.logError(_("You must enter a temperature."))
return
if f >= 0:
if f > 250:
print _("%s is a high temperature to set your extruder to. Are you sure you want to do that?") % f
if not self.confirm():
return
if self.p.online:
self.p.send_now("M104 S" + l)
self.log(_("Setting hotend temperature to %s degrees Celsius.") % f)
else:
self.logError(_("Printer is not online."))
else:
self.logError(_("You cannot set negative temperatures. To turn the hotend off entirely, set its temperature to 0."))
def help_settemp(self):
self.log(_("Sets the hotend temperature to the value entered."))
self.log(_("Enter either a temperature in celsius or one of the following keywords"))
self.log(", ".join([i + "(" + self.temps[i] + ")" for i in self.temps.keys()]))
def complete_settemp(self, text, line, begidx, endidx):
if (len(line.split()) == 2 and line[-1] != " ") or (len(line.split()) == 1 and line[-1] == " "):
return [i for i in self.temps.keys() if i.startswith(text)]
def do_bedtemp(self, l):
f = None
try:
l = l.lower().replace(", ", ".")
for i in self.bedtemps.keys():
l = l.replace(i, self.bedtemps[i])
f = float(l)
except:
self.logError(_("You must enter a temperature."))
if f is not None and f >= 0:
if self.p.online:
self.p.send_now("M140 S" + l)
self.log(_("Setting bed temperature to %s degrees Celsius.") % f)
else:
self.logError(_("Printer is not online."))
else:
self.logError(_("You cannot set negative temperatures. To turn the bed off entirely, set its temperature to 0."))
def help_bedtemp(self):
self.log(_("Sets the bed temperature to the value entered."))
self.log(_("Enter either a temperature in celsius or one of the following keywords"))
self.log(", ".join([i + "(" + self.bedtemps[i] + ")" for i in self.bedtemps.keys()]))
def complete_bedtemp(self, text, line, begidx, endidx):
if (len(line.split()) == 2 and line[-1] != " ") or (len(line.split()) == 1 and line[-1] == " "):
return [i for i in self.bedtemps.keys() if i.startswith(text)]
def do_move(self, l):
if(len(l.split()) < 2):
self.logError(_("No move specified."))
return
if self.p.printing:
self.logError(_("Printer is currently printing. Please pause the print before you issue manual commands."))
return
if not self.p.online:
self.logError(_("Printer is not online. Unable to move."))
return
l = l.split()
if(l[0].lower() == "x"):
feed = self.settings.xy_feedrate
axis = "X"
elif(l[0].lower() == "y"):
feed = self.settings.xy_feedrate
axis = "Y"
elif(l[0].lower() == "z"):
feed = self.settings.z_feedrate
axis = "Z"
elif(l[0].lower() == "e"):
feed = self.settings.e_feedrate
axis = "E"
else:
self.logError(_("Unknown axis."))
return
try:
float(l[1]) # check if distance can be a float
except:
self.logError(_("Invalid distance"))
return
try:
feed = int(l[2])
except:
pass
self.p.send_now("G91")
self.p.send_now("G1 " + axis + str(l[1]) + " F" + str(feed))
self.p.send_now("G90")
def help_move(self):
self.log(_("Move an axis. Specify the name of the axis and the amount. "))
self.log(_("move X 10 will move the X axis forward by 10mm at %s mm/min (default XY speed)") % self.settings.xy_feedrate)
self.log(_("move Y 10 5000 will move the Y axis forward by 10mm at 5000mm/min"))
self.log(_("move Z -1 will move the Z axis down by 1mm at %s mm/min (default Z speed)") % self.settings.z_feedrate)
self.log(_("Common amounts are in the tabcomplete list."))
def complete_move(self, text, line, begidx, endidx):
if (len(line.split()) == 2 and line[-1] != " ") or (len(line.split()) == 1 and line[-1] == " "):
return [i for i in ["X ", "Y ", "Z ", "E "] if i.lower().startswith(text)]
elif(len(line.split()) == 3 or (len(line.split()) == 2 and line[-1] == " ")):
base = line.split()[-1]
rlen = 0
if base.startswith("-"):
rlen = 1
if line[-1] == " ":
base = ""
return [i[rlen:] for i in ["-100", "-10", "-1", "-0.1", "100", "10", "1", "0.1", "-50", "-5", "-0.5", "50", "5", "0.5", "-200", "-20", "-2", "-0.2", "200", "20", "2", "0.2"] if i.startswith(base)]
else:
return []
def do_extrude(self, l, override = None, overridefeed = 300):
length = 5 # default extrusion length
feed = self.settings.e_feedrate # default speed
if not self.p.online:
self.logError("Printer is not online. Unable to extrude.")
return
if self.p.printing:
self.logError(_("Printer is currently printing. Please pause the print before you issue manual commands."))
return
ls = l.split()
if len(ls):
try:
length = float(ls[0])
except:
self.logError(_("Invalid length given."))
if len(ls) > 1:
try:
feed = int(ls[1])
except:
self.logError(_("Invalid speed given."))
if override is not None:
length = override
feed = overridefeed
if length > 0:
self.log(_("Extruding %fmm of filament.") % (length,))
elif length < 0:
self.log(_("Reversing %fmm of filament.") % (-length,))
else:
self.log(_("Length is 0, not doing anything."))
self.p.send_now("G91")
self.p.send_now("G1 E" + str(length) + " F" + str(feed))
self.p.send_now("G90")
def help_extrude(self):
self.log(_("Extrudes a length of filament, 5mm by default, or the number of mm given as a parameter"))
self.log(_("extrude - extrudes 5mm of filament at 300mm/min (5mm/s)"))
self.log(_("extrude 20 - extrudes 20mm of filament at 300mm/min (5mm/s)"))
self.log(_("extrude -5 - REVERSES 5mm of filament at 300mm/min (5mm/s)"))
self.log(_("extrude 10 210 - extrudes 10mm of filament at 210mm/min (3.5mm/s)"))
def do_reverse(self, l):
length = 5 # default extrusion length
feed = self.settings.e_feedrate # default speed
if not self.p.online:
self.logError(_("Printer is not online. Unable to reverse."))
return
if self.p.printing:
self.logError(_("Printer is currently printing. Please pause the print before you issue manual commands."))
return
ls = l.split()
if len(ls):
try:
length = float(ls[0])
except:
self.logError(_("Invalid length given."))
if len(ls) > 1:
try:
feed = int(ls[1])
except:
self.logError(_("Invalid speed given."))
self.do_extrude("", -length, feed)
def help_reverse(self):
self.log(_("Reverses the extruder, 5mm by default, or the number of mm given as a parameter"))
self.log(_("reverse - reverses 5mm of filament at 300mm/min (5mm/s)"))
self.log(_("reverse 20 - reverses 20mm of filament at 300mm/min (5mm/s)"))
self.log(_("reverse 10 210 - extrudes 10mm of filament at 210mm/min (3.5mm/s)"))
self.log(_("reverse -5 - EXTRUDES 5mm of filament at 300mm/min (5mm/s)"))
def do_exit(self, l):
if self.status.extruder_temp_target != 0:
print "Setting extruder temp to 0"
self.p.send_now("M104 S0.0")
if self.status.bed_enabled:
if self.status.bed_temp_taret != 0:
print "Setting bed temp to 0"
self.p.send_now("M140 S0.0")
self.log("Disconnecting from printer...")
if self.p.printing:
print "Are you sure you want to exit while printing?"
print "(this will terminate the print)."
if not self.confirm():
return
self.log(_("Exiting program. Goodbye!"))
self.p.disconnect()
sys.exit()
def help_exit(self):
self.log(_("Disconnects from the printer and exits the program."))
def do_monitor(self, l):
interval = 5
if not self.p.online:
self.logError(_("Printer is not online. Please connect to it first."))
return
if not (self.p.printing or self.sdprinting):
self.logError(_("Printer is not printing. Please print something before monitoring."))
return
self.log(_("Monitoring printer, use ^C to interrupt."))
if len(l):
try:
interval = float(l)
except:
self.logError(_("Invalid period given."))
self.log(_("Updating values every %f seconds.") % (interval,))
self.monitoring = 1
prev_msg_len = 0
try:
while True:
self.p.send_now("M105")
if(self.sdprinting):
self.p.send_now("M27")
time.sleep(interval)
#print (self.tempreadings.replace("\r", "").replace("T", "Hotend").replace("B", "Bed").replace("\n", "").replace("ok ", ""))
if self.p.printing:
preface = _("Print progress: ")
progress = 100 * float(self.p.queueindex) / len(self.p.mainqueue)
elif self.sdprinting:
preface = _("Print progress: ")
progress = self.percentdone
prev_msg = preface + "%.1f%%" % progress
if self.silent is False:
sys.stdout.write("\r" + prev_msg.ljust(prev_msg_len))
sys.stdout.flush()
prev_msg_len = len(prev_msg)
except KeyboardInterrupt:
if self.silent is False: print _("Done monitoring.")
self.monitoring = 0
def help_monitor(self):
self.log(_("Monitor a machine's temperatures and an SD print's status."))
self.log(_("monitor - Reports temperature and SD print status (if SD printing) every 5 seconds"))
self.log(_("monitor 2 - Reports temperature and SD print status (if SD printing) every 2 seconds"))
def expandcommand(self, c):
return c.replace("$python", sys.executable)
def do_skein(self, l):
l = l.split()
if len(l) == 0:
self.logError(_("No file name given."))
return
settings = 0
if l[0] == "set":
settings = 1
else:
self.log(_("Skeining file: %s") % l[0])
if not(os.path.exists(l[0])):
self.logError(_("File not found!"))
return
try:
if settings:
param = self.expandcommand(self.settings.sliceoptscommand).replace("\\", "\\\\").encode()
self.log(_("Entering slicer settings: %s") % param)
subprocess.call(shlex.split(param))
else:
param = self.expandcommand(self.settings.slicecommand).encode()
self.log(_("Slicing: ") % param)
params = [i.replace("$s", l[0]).replace("$o", l[0].replace(".stl", "_export.gcode").replace(".STL", "_export.gcode")).encode() for i in shlex.split(param.replace("\\", "\\\\").encode())]
subprocess.call(params)
self.log(_("Loading sliced file."))
self.do_load(l[0].replace(".stl", "_export.gcode"))
except Exception, e:
self.logError(_("Slicing failed: %s") % e)
def complete_skein(self, text, line, begidx, endidx):
s = line.split()
if len(s) > 2:
return []
if (len(s) == 1 and line[-1] == " ") or (len(s) == 2 and line[-1] != " "):
if len(s) > 1:
return [i[len(s[1]) - len(text):] for i in glob.glob(s[1] + "*/") + glob.glob(s[1] + "*.stl")]
else:
return glob.glob("*/") + glob.glob("*.stl")
def help_skein(self):
self.log(_("Creates a gcode file from an stl model using the slicer (with tab-completion)"))
self.log(_("skein filename.stl - create gcode file"))
self.log(_("skein filename.stl view - create gcode file and view using skeiniso"))
self.log(_("skein set - adjust slicer settings"))
def do_home(self, l):
if not self.p.online:
self.logError(_("Printer is not online. Unable to move."))
return
if self.p.printing:
self.logError(_("Printer is currently printing. Please pause the print before you issue manual commands."))
return
if "x" in l.lower():
self.p.send_now("G28 X0")
if "y" in l.lower():
self.p.send_now("G28 Y0")
if "z" in l.lower():
self.p.send_now("G28 Z0")
if "e" in l.lower():
self.p.send_now("G92 E0")
if not len(l):
self.p.send_now("G28")
self.p.send_now("G92 E0")
def help_home(self):
self.log(_("Homes the printer"))
self.log(_("home - homes all axes and zeroes the extruder(Using G28 and G92)"))
self.log(_("home xy - homes x and y axes (Using G28)"))
self.log(_("home z - homes z axis only (Using G28)"))
self.log(_("home e - set extruder position to zero (Using G92)"))
self.log(_("home xyze - homes all axes and zeroes the extruder (Using G28 and G92)"))
def do_off(self, l):
self.off()
def off(self, ignore = None):
if self.p.online:
if self.p.printing: self.pause(None)
self.log(_("; Motors off"))
self.onecmd("M84")
self.log(_("; Extruder off"))
self.onecmd("M104 S0")
self.log(_("; Heatbed off"))
self.onecmd("M140 S0")
self.log(_("; Fan off"))
self.onecmd("M107")
self.log(_("; Power supply off"))
self.onecmd("M81")
else:
self.logError(_("Printer is not online. Unable to turn it off."))
def help_off(self):
self.log(_("Turns off everything on the printer"))
def add_cmdline_arguments(self, parser):
parser.add_argument('-c', '--conf', '--config', help = _("load this file on startup instead of .pronsolerc ; you may chain config files, if so settings auto-save will use the last specified file"), action = "append", default = [])
parser.add_argument('-e', '--execute', help = _("executes command after configuration/.pronsolerc is loaded ; macros/settings from these commands are not autosaved"), action = "append", default = [])
parser.add_argument('filename', nargs='?', help = _("file to load"))
def process_cmdline_arguments(self, args):
for config in args.conf:
self.load_rc(config)
if not self.rc_loaded:
self.load_default_rc()
self.processing_args = True
for command in args.execute:
self.onecmd(command)
self.processing_args = False
if args.filename:
filename = args.filename.decode(locale.getpreferredencoding())
self.cmdline_filename_callback(filename)
def cmdline_filename_callback(self, filename):
self.do_load(filename)
def parse_cmdline(self, args):
parser = argparse.ArgumentParser(description = 'Printrun 3D printer interface')
self.add_cmdline_arguments(parser)
args = [arg for arg in args if not arg.startswith("-psn")]
args = parser.parse_args(args = args)
self.process_cmdline_arguments(args)
# We replace this function, defined in cmd.py .
# It's default behavior with reagrds to Ctr-C
# and Ctr-D doesn't make much sense...
def cmdloop(self, intro=None):
"""Repeatedly issue a prompt, accept input, parse an initial prefix
off the received input, and dispatch to action methods, passing them
the remainder of the line as argument.
"""
self.preloop()
if self.use_rawinput and self.completekey:
try:
import readline
self.old_completer = readline.get_completer()
readline.set_completer(self.complete)
readline.parse_and_bind(self.completekey + ": complete")
except ImportError:
pass
try:
if intro is not None:
self.intro = intro
if self.intro:
self.stdout.write(str(self.intro) + "\n")
stop = None
while not stop:
if self.cmdqueue:
line = self.cmdqueue.pop(0)
else:
if self.use_rawinput:
try:
line = raw_input(self.prompt)
except EOFError:
print ""
self.do_exit("")
except KeyboardInterrupt:
print ""
line = ""
else:
self.stdout.write(self.prompt)
self.stdout.flush()
line = self.stdin.readline()
if not len(line):
line = ""
else:
line = line.rstrip('\r\n')
line = self.precmd(line)
stop = self.onecmd(line)
stop = self.postcmd(stop, line)
self.postloop()
finally:
if self.use_rawinput and self.completekey:
try:
import readline
readline.set_completer(self.old_completer)
except ImportError:
pass
if __name__ == "__main__":
interp = pronsole()
interp.parse_cmdline(sys.argv[1:])
try:
interp.cmdloop()
except SystemExit:
interp.p.disconnect()
except:
print _("Caught an exception, exiting:")
traceback.print_exc()
interp.p.disconnect()
|
pintubigfoot/pinturun
|
pronsole.py
|
Python
|
gpl-3.0
| 61,093
|
[
"Firefly"
] |
83e21a2bf9f6f516dd50f43eb9a23d44f6b57abdfa841c444263c3fa33bdd1b4
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import os
from pymatgen.core.structure import Structure
from pymatgen.io.atat import Mcsqs
from pymatgen.util.testing import PymatgenTest
test_dir = os.path.join(PymatgenTest.TEST_FILES_DIR, "mcsqs")
class AtatTest(PymatgenTest):
def test_mcsqs_import(self):
test_string = """1.000000 0.000000 0.000000
0.000000 1.000000 0.000000
0.000000 0.000000 1.000000
0.000000 -1.000000 -2.000000
2.000000 -1.000000 0.000000
-1.000000 -1.000000 1.000000
0.000000 -2.000000 -1.000000 Mn
1.000000 -2.000000 -1.000000 Mn
0.000000 -1.000000 -1.000000 Mn
-0.000000 -2.000000 0.000000 Mn
1.000000 -2.000000 0.000000 Mn
0.000000 -1.000000 0.000000 Mn
1.000000 -1.000000 0.000000 Fe
1.000000 -3.000000 -1.000000 Mn
0.500000 -1.500000 -0.500000 Sr
1.500000 -1.500000 -0.500000 Ca
-0.500000 -1.500000 0.500000 Ca
0.500000 -1.500000 0.500000 Ca
1.500000 -2.500000 -1.500000 Ca
0.500000 -1.500000 -1.500000 Sr
0.500000 -2.500000 -0.500000 Sr
-0.500000 -1.500000 -0.500000 Ca
0.000000 -1.500000 -1.000000 O
1.000000 -1.500000 -1.000000 O
1.000000 -2.500000 0.000000 O
-0.000000 -1.500000 0.000000 O
1.000000 -1.500000 0.000000 O
0.000000 -0.500000 0.000000 O
0.000000 -2.500000 -1.000000 O
1.000000 -2.500000 -1.000000 O
0.500000 -2.000000 -1.000000 O
1.500000 -2.000000 -1.000000 O
0.500000 -1.000000 -1.000000 O
0.500000 -2.000000 0.000000 O
-0.500000 -1.000000 0.000000 O
0.500000 -1.000000 0.000000 O
1.500000 -1.000000 0.000000 O
-0.500000 -2.000000 -1.000000 O
0.000000 -2.000000 -0.500000 O
1.000000 -2.000000 -0.500000 O
0.000000 -1.000000 -0.500000 O
1.000000 -1.000000 -0.500000 O
1.000000 -2.000000 0.500000 O
0.000000 -1.000000 0.500000 O
1.000000 -2.000000 -1.500000 O
0.000000 -1.000000 -1.500000 O
"""
s = Mcsqs.structure_from_string(test_string)
self.assertEqual(s.composition.formula, "Sr3 Ca5 Mn7 Fe1 O24")
self.assertAlmostEqual(s.lattice.a, 2.2360679775)
self.assertAlmostEqual(s.lattice.b, 2.2360679775)
self.assertAlmostEqual(s.lattice.c, 1.73205080757)
def test_mcsqs_export(self):
s = self.get_structure("SrTiO3")
s.replace_species({"Sr2+": {"Sr2+": 0.5, "Ca2+": 0.5}})
ref_string = """3.905000 0.000000 0.000000
-0.000000 3.905000 0.000000
0.000000 0.000000 3.905000
1.0 0.0 0.0
0.0 1.0 0.0
0.0 0.0 1.0
0.500000 0.500000 0.500000 Sr2+=0.5,Ca2+=0.5
0.000000 0.000000 0.000000 Ti4+=1.0
0.000000 0.000000 0.500000 O2-=1.0
0.000000 0.500000 0.000000 O2-=1.0
0.500000 0.000000 0.000000 O2-=1.0"""
self.assertEqual(Mcsqs(s).to_string(), ref_string)
def test_mcsqs_cif_nacl(self):
# cif file from str2cif (utility distributed with atat)
struc_from_cif = Structure.from_file(os.path.join(test_dir, "bestsqs_nacl.cif"))
# output file directly from mcsqs
struc_from_out = Structure.from_file(os.path.join(test_dir, "bestsqs_nacl.out"))
self.assertTrue(struc_from_cif.matches(struc_from_out))
self.assertArrayAlmostEqual(
struc_from_out.lattice.parameters,
struc_from_cif.lattice.parameters,
decimal=4,
)
def test_mcsqs_cif_pzt(self):
# cif file from str2cif (utility distributed with atat)
struc_from_cif = Structure.from_file(os.path.join(test_dir, "bestsqs_pzt.cif"))
# output file directly from mcsqs
struc_from_out = Structure.from_file(os.path.join(test_dir, "bestsqs_pzt.out"))
self.assertTrue(struc_from_cif.matches(struc_from_out))
self.assertArrayAlmostEqual(
struc_from_out.lattice.parameters,
struc_from_cif.lattice.parameters,
decimal=4,
)
|
materialsproject/pymatgen
|
pymatgen/io/tests/test_atat.py
|
Python
|
mit
| 3,733
|
[
"pymatgen"
] |
d960c73cd098d43ad9fd1fbc229086a293bda5c4a893b80fc2448dde768c8690
|
""" This tests only need the JobDB, and connects directly to it
"""
import unittest
from DIRAC.Core.Base.Script import parseCommandLine
parseCommandLine()
from DIRAC import gLogger
from DIRAC.WorkloadManagementSystem.DB.JobDB import JobDB
jdl = """
[
Origin = "DIRAC";
Executable = "$DIRACROOT/scripts/dirac-jobexec";
StdError = "std.err";
LogLevel = "info";
Site = "ANY";
JobName = "helloWorld";
Priority = "1";
InputSandbox =
{
"../../Integration/WorkloadManagementSystem/exe-script.py",
"exe-script.py",
"/tmp/tmpMQEink/jobDescription.xml",
"SB:FedericoSandboxSE|/SandBox/f/fstagni.lhcb_user/0c2/9f5/0c29f53a47d051742346b744c793d4d0.tar.bz2"
};
Arguments = "jobDescription.xml -o LogLevel=info";
JobGroup = "lhcb";
OutputSandbox =
{
"helloWorld.log",
"std.err",
"std.out"
};
StdOutput = "std.out";
InputData = "";
JobType = "User";
]
"""
class JobDBTestCase( unittest.TestCase ):
""" Base class for the JobDB test cases
"""
def setUp( self ):
gLogger.setLevel( 'DEBUG' )
self.jobDB = JobDB()
def tearDown( self ):
result = self.jobDB.selectJobs( {} )
self.assertTrue( result['OK'], 'Status after selectJobs' )
jobs = result['Value']
for job in jobs:
result = self.jobDB.removeJobFromDB( job )
self.assertTrue(result['OK'])
class JobSubmissionCase( JobDBTestCase ):
""" TestJobDB represents a test suite for the JobDB database front-end
"""
def test_insertAndRemoveJobIntoDB( self ):
res = self.jobDB.insertNewJobIntoDB( jdl, 'owner', '/DN/OF/owner', 'ownerGroup', 'someSetup' )
self.assertTrue(res['OK'])
jobID = res['JobID']
res = self.jobDB.getJobAttribute( jobID, 'Status' )
self.assertTrue(res['OK'])
self.assertEqual( res['Value'], 'Received' )
res = self.jobDB.getJobAttribute( jobID, 'MinorStatus' )
self.assertTrue(res['OK'])
self.assertEqual( res['Value'], 'Job accepted' )
res = self.jobDB.getJobOptParameters( jobID )
self.assertTrue(res['OK'])
self.assertEqual( res['Value'], {} )
class JobRescheduleCase(JobDBTestCase):
def test_rescheduleJob(self):
res = self.jobDB.insertNewJobIntoDB( jdl, 'owner', '/DN/OF/owner', 'ownerGroup', 'someSetup' )
self.assertTrue(res['OK'])
jobID = res['JobID']
result = self.jobDB.rescheduleJob(jobID)
self.assertTrue(result['OK'])
res = self.jobDB.getJobAttribute( jobID, 'Status' )
self.assertTrue(res['OK'])
self.assertEqual( res['Value'], 'Received' )
result = self.jobDB.getJobAttribute( jobID, 'MinorStatus' )
self.assertTrue(result['OK'])
self.assertEqual( result['Value'], 'Job Rescheduled' )
class CountJobsCase(JobDBTestCase):
def test_getCounters(self):
result = self.jobDB.getCounters( 'Jobs', ['Status', 'MinorStatus'], {}, '2007-04-22 00:00:00' )
self.assertTrue( result['OK'],'Status after getCounters')
if __name__ == '__main__':
suite = unittest.defaultTestLoader.loadTestsFromTestCase(JobSubmissionCase)
suite.addTest( unittest.defaultTestLoader.loadTestsFromTestCase( JobRescheduleCase ) )
suite.addTest( unittest.defaultTestLoader.loadTestsFromTestCase( CountJobsCase ) )
testResult = unittest.TextTestRunner(verbosity=2).run(suite)
|
Andrew-McNab-UK/DIRAC
|
tests/Integration/WorkloadManagementSystem/Test_JobDB.py
|
Python
|
gpl-3.0
| 3,391
|
[
"DIRAC"
] |
4caf2650c8474cd6e57520efdff8f4a6c93e47fe39c57e5595df427eecdf946e
|
# -*- coding: utf-8 -*-
"""
.. _tut-artifact-ica:
Repairing artifacts with ICA
============================
This tutorial covers the basics of independent components analysis (ICA) and
shows how ICA can be used for artifact repair; an extended example illustrates
repair of ocular and heartbeat artifacts.
.. contents:: Page contents
:local:
:depth: 2
We begin as always by importing the necessary Python modules and loading some
:ref:`example data <sample-dataset>`. Because ICA can be computationally
intense, we'll also crop the data to 60 seconds; and to save ourselves from
repeatedly typing ``mne.preprocessing`` we'll directly import a few functions
and classes from that submodule:
"""
import os
import mne
from mne.preprocessing import (ICA, create_eog_epochs, create_ecg_epochs,
corrmap)
sample_data_folder = mne.datasets.sample.data_path()
sample_data_raw_file = os.path.join(sample_data_folder, 'MEG', 'sample',
'sample_audvis_raw.fif')
raw = mne.io.read_raw_fif(sample_data_raw_file)
raw.crop(tmax=60.)
###############################################################################
# .. note::
# Before applying ICA (or any artifact repair strategy), be sure to observe
# the artifacts in your data to make sure you choose the right repair tool.
# Sometimes the right tool is no tool at all — if the artifacts are small
# enough you may not even need to repair them to get good analysis results.
# See :ref:`tut-artifact-overview` for guidance on detecting and
# visualizing various types of artifact.
#
# What is ICA?
# ^^^^^^^^^^^^
#
# Independent components analysis (ICA) is a technique for estimating
# independent source signals from a set of recordings in which the source
# signals were mixed together in unknown ratios. A common example of this is
# the problem of `blind source separation`_: with 3 musical instruments playing
# in the same room, and 3 microphones recording the performance (each picking
# up all 3 instruments, but at varying levels), can you somehow "unmix" the
# signals recorded by the 3 microphones so that you end up with a separate
# "recording" isolating the sound of each instrument?
#
# It is not hard to see how this analogy applies to EEG/MEG analysis: there are
# many "microphones" (sensor channels) simultaneously recording many
# "instruments" (blinks, heartbeats, activity in different areas of the brain,
# muscular activity from jaw clenching or swallowing, etc). As long as these
# various source signals are `statistically independent`_ and non-gaussian, it
# is usually possible to separate the sources using ICA, and then re-construct
# the sensor signals after excluding the sources that are unwanted.
#
#
# ICA in MNE-Python
# ~~~~~~~~~~~~~~~~~
#
# .. sidebar:: ICA and dimensionality reduction
#
# If you want to perform ICA with *no* dimensionality reduction (other than
# the number of Independent Components (ICs) given in ``n_components``, and
# any subsequent exclusion of ICs you specify in ``ICA.exclude``), pass
# ``max_pca_components=None`` and ``n_pca_components=None`` (these are the
# default values).
#
# However, if you *do* want to reduce dimensionality, consider this
# example: if you have 300 sensor channels and you set
# ``max_pca_components=200``, ``n_components=50`` and
# ``n_pca_components=None``, then the PCA step yields 200 PCs, the first 50
# PCs are sent to the ICA algorithm (yielding 50 ICs), and during
# reconstruction :meth:`~mne.preprocessing.ICA.apply` will use the 50 ICs
# plus PCs number 51-200 (the full PCA residual). If instead you specify
# ``n_pca_components=120`` then :meth:`~mne.preprocessing.ICA.apply` will
# reconstruct using the 50 ICs plus the first 70 PCs in the PCA residual
# (numbers 51-120).
#
# **If you have previously been using EEGLAB**'s ``runica()`` and are
# looking for the equivalent of its ``'pca', n`` option to reduce
# dimensionality via PCA before the ICA step, set ``max_pca_components=n``,
# while leaving ``n_components`` and ``n_pca_components`` at their default
# (i.e., ``None``).
#
# MNE-Python implements three different ICA algorithms: ``fastica`` (the
# default), ``picard``, and ``infomax``. FastICA and Infomax are both in fairly
# widespread use; Picard is a newer (2017) algorithm that is expected to
# converge faster than FastICA and Infomax, and is more robust than other
# algorithms in cases where the sources are not completely independent, which
# typically happens with real EEG/MEG data. See [1]_ for more information.
#
# The ICA interface in MNE-Python is similar to the interface in
# `scikit-learn`_: some general parameters are specified when creating an
# :class:`~mne.preprocessing.ICA` object, then the
# :class:`~mne.preprocessing.ICA` object is fit to the data using its
# :meth:`~mne.preprocessing.ICA.fit` method. The results of the fitting are
# added to the :class:`~mne.preprocessing.ICA` object as attributes that end in
# an underscore (``_``), such as ``ica.mixing_matrix_`` and
# ``ica.unmixing_matrix_``. After fitting, the ICA component(s) that you want
# to remove must be chosen, and the ICA fit must then be applied to the
# :class:`~mne.io.Raw` or :class:`~mne.Epochs` object using the
# :class:`~mne.preprocessing.ICA` object's :meth:`~mne.preprocessing.ICA.apply`
# method.
#
# As is typically done with ICA, the data are first scaled to unit variance and
# whitened using principal components analysis (PCA) before performing the ICA
# decomposition. You can impose an optional dimensionality reduction at this
# step by specifying ``max_pca_components``. From the retained Principal
# Components (PCs), the first ``n_components`` are then passed to the ICA
# algorithm (``n_components`` may be an integer number of components to use, or
# a fraction of explained variance that used components should capture).
#
# After visualizing the Independent Components (ICs) and excluding any that
# capture artifacts you want to repair, the sensor signal can be reconstructed
# using the :class:`~mne.preprocessing.ICA` object's
# :meth:`~mne.preprocessing.ICA.apply` method. By default, signal
# reconstruction uses all of the ICs (less any ICs listed in ``ICA.exclude``)
# plus all of the PCs that were not included in the ICA decomposition (i.e.,
# the "PCA residual"). If you want to reduce the number of components used at
# the reconstruction stage, it is controlled by the ``n_pca_components``
# parameter (which will in turn reduce the rank of your data; by default
# ``n_pca_components = max_pca_components`` resulting in no additional
# dimensionality reduction). The fitting and reconstruction procedures and the
# parameters that control dimensionality at various stages are summarized in
# the diagram below:
#
# .. graphviz:: ../../_static/diagrams/ica.dot
# :alt: Diagram of ICA procedure in MNE-Python
# :align: left
#
# See the Notes section of the :class:`~mne.preprocessing.ICA` documentation
# for further details. Next we'll walk through an extended example that
# illustrates each of these steps in greater detail.
#
# Example: EOG and ECG artifact repair
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# Visualizing the artifacts
# ~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Let's begin by visualizing the artifacts that we want to repair. In this
# dataset they are big enough to see easily in the raw data:
# pick some channels that clearly show heartbeats and blinks
regexp = r'(MEG [12][45][123]1|EEG 00.)'
artifact_picks = mne.pick_channels_regexp(raw.ch_names, regexp=regexp)
raw.plot(order=artifact_picks, n_channels=len(artifact_picks))
###############################################################################
# We can get a summary of how the ocular artifact manifests across each channel
# type using :func:`~mne.preprocessing.create_eog_epochs` like we did in the
# :ref:`tut-artifact-overview` tutorial:
eog_evoked = create_eog_epochs(raw).average()
eog_evoked.apply_baseline(baseline=(None, -0.2))
eog_evoked.plot_joint()
###############################################################################
# Now we'll do the same for the heartbeat artifacts, using
# :func:`~mne.preprocessing.create_ecg_epochs`:
ecg_evoked = create_ecg_epochs(raw).average()
ecg_evoked.apply_baseline(baseline=(None, -0.2))
ecg_evoked.plot_joint()
###############################################################################
# Filtering to remove slow drifts
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Before we run the ICA, an important step is filtering the data to remove
# low-frequency drifts, which can negatively affect the quality of the ICA fit.
# The slow drifts are problematic because they reduce the independence of the
# assumed-to-be-independent sources (e.g., during a slow upward drift, the
# neural, heartbeat, blink, and other muscular sources will all tend to have
# higher values), making it harder for the algorithm to find an accurate
# solution. A high-pass filter with 1 Hz cutoff frequency is recommended.
# However, because filtering is a linear operation, the ICA solution found from
# the filtered signal can be applied to the unfiltered signal (see [2]_ for
# more information), so we'll keep a copy of the unfiltered
# :class:`~mne.io.Raw` object around so we can apply the ICA solution to it
# later.
filt_raw = raw.copy()
filt_raw.load_data().filter(l_freq=1., h_freq=None)
###############################################################################
# Fitting and plotting the ICA solution
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# .. sidebar:: Ignoring the time domain
#
# The ICA algorithms implemented in MNE-Python find patterns across
# channels, but ignore the time domain. This means you can compute ICA on
# discontinuous :class:`~mne.Epochs` or :class:`~mne.Evoked` objects (not
# just continuous :class:`~mne.io.Raw` objects), or only use every Nth
# sample by passing the ``decim`` parameter to ``ICA.fit()``.
#
# Now we're ready to set up and fit the ICA. Since we know (from observing our
# raw data) that the EOG and ECG artifacts are fairly strong, we would expect
# those artifacts to be captured in the first few dimensions of the PCA
# decomposition that happens before the ICA. Therefore, we probably don't need
# a huge number of components to do a good job of isolating our artifacts
# (though it is usually preferable to include more components for a more
# accurate solution). As a first guess, we'll run ICA with ``n_components=15``
# (use only the first 15 PCA components to compute the ICA decomposition) — a
# very small number given that our data has over 300 channels, but with the
# advantage that it will run quickly and we will able to tell easily whether it
# worked or not (because we already know what the EOG / ECG artifacts should
# look like).
#
# ICA fitting is not deterministic (e.g., the components may get a sign
# flip on different runs, or may not always be returned in the same order), so
# we'll also specify a `random seed`_ so that we get identical results each
# time this tutorial is built by our web servers.
ica = ICA(n_components=15, random_state=97)
ica.fit(filt_raw)
###############################################################################
# Some optional parameters that we could have passed to the
# :meth:`~mne.preprocessing.ICA.fit` method include ``decim`` (to use only
# every Nth sample in computing the ICs, which can yield a considerable
# speed-up) and ``reject`` (for providing a rejection dictionary for maximum
# acceptable peak-to-peak amplitudes for each channel type, just like we used
# when creating epoched data in the :ref:`tut-overview` tutorial).
#
# Now we can examine the ICs to see what they captured.
# :meth:`~mne.preprocessing.ICA.plot_sources` will show the time series of the
# ICs. Note that in our call to :meth:`~mne.preprocessing.ICA.plot_sources` we
# can use the original, unfiltered :class:`~mne.io.Raw` object:
raw.load_data()
ica.plot_sources(raw)
###############################################################################
# Here we can pretty clearly see that the first component (``ICA000``) captures
# the EOG signal quite well, and the second component (``ICA001``) looks a lot
# like `a heartbeat <qrs_>`_ (for more info on visually identifying Independent
# Components, `this EEGLAB tutorial`_ is a good resource). We can also
# visualize the scalp field distribution of each component using
# :meth:`~mne.preprocessing.ICA.plot_components`. These are interpolated based
# on the values in the ICA unmixing matrix:
# sphinx_gallery_thumbnail_number = 9
ica.plot_components()
###############################################################################
# .. note::
#
# :meth:`~mne.preprocessing.ICA.plot_components` (which plots the scalp
# field topographies for each component) has an optional ``inst`` parameter
# that takes an instance of :class:`~mne.io.Raw` or :class:`~mne.Epochs`.
# Passing ``inst`` makes the scalp topographies interactive: clicking one
# will bring up a diagnostic :meth:`~mne.preprocessing.ICA.plot_properties`
# window (see below) for that component.
#
# In the plots above it's fairly obvious which ICs are capturing our EOG and
# ECG artifacts, but there are additional ways visualize them anyway just to
# be sure. First, we can plot an overlay of the original signal against the
# reconstructed signal with the artifactual ICs excluded, using
# :meth:`~mne.preprocessing.ICA.plot_overlay`:
# blinks
ica.plot_overlay(raw, exclude=[0], picks='eeg')
# heartbeats
ica.plot_overlay(raw, exclude=[1], picks='mag')
###############################################################################
# We can also plot some diagnostics of each IC using
# :meth:`~mne.preprocessing.ICA.plot_properties`:
ica.plot_properties(raw, picks=[0, 1])
###############################################################################
# In the remaining sections, we'll look at different ways of choosing which ICs
# to exclude prior to reconstructing the sensor signals.
#
#
# Selecting ICA components manually
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Once we're certain which components we want to exclude, we can specify that
# manually by setting the ``ica.exclude`` attribute. Similar to marking bad
# channels, merely setting ``ica.exclude`` doesn't do anything immediately (it
# just adds the excluded ICs to a list that will get used later when it's
# needed). Once the exclusions have been set, ICA methods like
# :meth:`~mne.preprocessing.ICA.plot_overlay` will exclude those component(s)
# even if no ``exclude`` parameter is passed, and the list of excluded
# components will be preserved when using :meth:`mne.preprocessing.ICA.save`
# and :func:`mne.preprocessing.read_ica`.
ica.exclude = [0, 1] # indices chosen based on various plots above
###############################################################################
# Now that the exclusions have been set, we can reconstruct the sensor signals
# with artifacts removed using the :meth:`~mne.preprocessing.ICA.apply` method
# (remember, we're applying the ICA solution from the *filtered* data to the
# original *unfiltered* signal). Plotting the original raw data alongside the
# reconstructed data shows that the heartbeat and blink artifacts are repaired.
# ica.apply() changes the Raw object in-place, so let's make a copy first:
reconst_raw = raw.copy()
ica.apply(reconst_raw)
raw.plot(order=artifact_picks, n_channels=len(artifact_picks))
reconst_raw.plot(order=artifact_picks, n_channels=len(artifact_picks))
del reconst_raw
###############################################################################
# Using an EOG channel to select ICA components
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# It may have seemed easy to review the plots and manually select which ICs to
# exclude, but when processing dozens or hundreds of subjects this can become
# a tedious, rate-limiting step in the analysis pipeline. One alternative is to
# use dedicated EOG or ECG sensors as a "pattern" to check the ICs against, and
# automatically mark for exclusion any ICs that match the EOG/ECG pattern. Here
# we'll use :meth:`~mne.preprocessing.ICA.find_bads_eog` to automatically find
# the ICs that best match the EOG signal, then use
# :meth:`~mne.preprocessing.ICA.plot_scores` along with our other plotting
# functions to see which ICs it picked. We'll start by resetting
# ``ica.exclude`` back to an empty list:
ica.exclude = []
# find which ICs match the EOG pattern
eog_indices, eog_scores = ica.find_bads_eog(raw)
ica.exclude = eog_indices
# barplot of ICA component "EOG match" scores
ica.plot_scores(eog_scores)
# plot diagnostics
ica.plot_properties(raw, picks=eog_indices)
# plot ICs applied to raw data, with EOG matches highlighted
ica.plot_sources(raw)
# plot ICs applied to the averaged EOG epochs, with EOG matches highlighted
ica.plot_sources(eog_evoked)
###############################################################################
# Note that above we used :meth:`~mne.preprocessing.ICA.plot_sources` on both
# the original :class:`~mne.io.Raw` instance and also on an
# :class:`~mne.Evoked` instance of the extracted EOG artifacts. This can be
# another way to confirm that :meth:`~mne.preprocessing.ICA.find_bads_eog` has
# identified the correct components.
#
#
# Using a simulated channel to select ICA components
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# If you don't have an EOG channel,
# :meth:`~mne.preprocessing.ICA.find_bads_eog` has a ``ch_name`` parameter that
# you can use as a proxy for EOG. You can use a single channel, or create a
# bipolar reference from frontal EEG sensors and use that as virtual EOG
# channel. This carries a risk however: you must hope that the frontal EEG
# channels only reflect EOG and not brain dynamics in the prefrontal cortex (or
# you must not care about those prefrontal signals).
#
# For ECG, it is easier: :meth:`~mne.preprocessing.ICA.find_bads_ecg` can use
# cross-channel averaging of magnetometer or gradiometer channels to construct
# a virtual ECG channel, so if you have MEG channels it is usually not
# necessary to pass a specific channel name.
# :meth:`~mne.preprocessing.ICA.find_bads_ecg` also has two options for its
# ``method`` parameter: ``'ctps'`` (cross-trial phase statistics [3]_) and
# ``'correlation'`` (Pearson correlation between data and ECG channel).
ica.exclude = []
# find which ICs match the ECG pattern
ecg_indices, ecg_scores = ica.find_bads_ecg(raw, method='correlation')
ica.exclude = ecg_indices
# barplot of ICA component "ECG match" scores
ica.plot_scores(ecg_scores)
# plot diagnostics
ica.plot_properties(raw, picks=ecg_indices)
# plot ICs applied to raw data, with ECG matches highlighted
ica.plot_sources(raw)
# plot ICs applied to the averaged ECG epochs, with ECG matches highlighted
ica.plot_sources(ecg_evoked)
###############################################################################
# The last of these plots is especially useful: it shows us that the heartbeat
# artifact is coming through on *two* ICs, and we've only caught one of them.
# In fact, if we look closely at the output of
# :meth:`~mne.preprocessing.ICA.plot_sources` (online, you can right-click →
# "view image" to zoom in), it looks like ``ICA014`` has a weak periodic
# component that is in-phase with ``ICA001``. It might be worthwhile to re-run
# the ICA with more components to see if that second heartbeat artifact
# resolves out a little better:
# refit the ICA with 30 components this time
new_ica = ICA(n_components=30, random_state=97)
new_ica.fit(filt_raw)
# find which ICs match the ECG pattern
ecg_indices, ecg_scores = new_ica.find_bads_ecg(raw, method='correlation')
new_ica.exclude = ecg_indices
# barplot of ICA component "ECG match" scores
new_ica.plot_scores(ecg_scores)
# plot diagnostics
new_ica.plot_properties(raw, picks=ecg_indices)
# plot ICs applied to raw data, with ECG matches highlighted
new_ica.plot_sources(raw)
# plot ICs applied to the averaged ECG epochs, with ECG matches highlighted
new_ica.plot_sources(ecg_evoked)
###############################################################################
# Much better! Now we've captured both ICs that are reflecting the heartbeat
# artifact (and as a result, we got two diagnostic plots: one for each IC that
# reflects the heartbeat). This demonstrates the value of checking the results
# of automated approaches like :meth:`~mne.preprocessing.ICA.find_bads_ecg`
# before accepting them.
# clean up memory before moving on
del raw, filt_raw, ica, new_ica
###############################################################################
# Selecting ICA components using template matching
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# When dealing with multiple subjects, it is also possible to manually select
# an IC for exclusion on one subject, and then use that component as a
# *template* for selecting which ICs to exclude from other subjects' data,
# using :func:`mne.preprocessing.corrmap` [4]_. The idea behind
# :func:`~mne.preprocessing.corrmap` is that the artifact patterns are similar
# enough across subjects that corresponding ICs can be identified by
# correlating the ICs from each ICA solution with a common template, and
# picking the ICs with the highest correlation strength.
# :func:`~mne.preprocessing.corrmap` takes a list of ICA solutions, and a
# ``template`` parameter that specifies which ICA object and which component
# within it to use as a template.
#
# Since our sample dataset only contains data from one subject, we'll use a
# different dataset with multiple subjects: the EEGBCI dataset [5]_ [6]_. The
# dataset has 109 subjects, we'll just download one run (a left/right hand
# movement task) from each of the first 4 subjects:
mapping = {
'Fc5.': 'FC5', 'Fc3.': 'FC3', 'Fc1.': 'FC1', 'Fcz.': 'FCz', 'Fc2.': 'FC2',
'Fc4.': 'FC4', 'Fc6.': 'FC6', 'C5..': 'C5', 'C3..': 'C3', 'C1..': 'C1',
'Cz..': 'Cz', 'C2..': 'C2', 'C4..': 'C4', 'C6..': 'C6', 'Cp5.': 'CP5',
'Cp3.': 'CP3', 'Cp1.': 'CP1', 'Cpz.': 'CPz', 'Cp2.': 'CP2', 'Cp4.': 'CP4',
'Cp6.': 'CP6', 'Fp1.': 'Fp1', 'Fpz.': 'Fpz', 'Fp2.': 'Fp2', 'Af7.': 'AF7',
'Af3.': 'AF3', 'Afz.': 'AFz', 'Af4.': 'AF4', 'Af8.': 'AF8', 'F7..': 'F7',
'F5..': 'F5', 'F3..': 'F3', 'F1..': 'F1', 'Fz..': 'Fz', 'F2..': 'F2',
'F4..': 'F4', 'F6..': 'F6', 'F8..': 'F8', 'Ft7.': 'FT7', 'Ft8.': 'FT8',
'T7..': 'T7', 'T8..': 'T8', 'T9..': 'T9', 'T10.': 'T10', 'Tp7.': 'TP7',
'Tp8.': 'TP8', 'P7..': 'P7', 'P5..': 'P5', 'P3..': 'P3', 'P1..': 'P1',
'Pz..': 'Pz', 'P2..': 'P2', 'P4..': 'P4', 'P6..': 'P6', 'P8..': 'P8',
'Po7.': 'PO7', 'Po3.': 'PO3', 'Poz.': 'POz', 'Po4.': 'PO4', 'Po8.': 'PO8',
'O1..': 'O1', 'Oz..': 'Oz', 'O2..': 'O2', 'Iz..': 'Iz'
}
raws = list()
icas = list()
for subj in range(4):
# EEGBCI subjects are 1-indexed; run 3 is a left/right hand movement task
fname = mne.datasets.eegbci.load_data(subj + 1, runs=[3])[0]
raw = mne.io.read_raw_edf(fname)
# remove trailing `.` from channel names so we can set montage
raw.rename_channels(mapping)
raw.set_montage('standard_1005')
# fit ICA
ica = ICA(n_components=30, random_state=97)
ica.fit(raw)
raws.append(raw)
icas.append(ica)
###############################################################################
# Now let's run :func:`~mne.preprocessing.corrmap`:
# use the first subject as template; use Fpz as proxy for EOG
raw = raws[0]
ica = icas[0]
eog_inds, eog_scores = ica.find_bads_eog(raw, ch_name='Fpz')
corrmap(icas, template=(0, eog_inds[0]))
###############################################################################
# The first figure shows the template map, while the second figure shows all
# the maps that were considered a "match" for the template (including the
# template itself). There were only three matches from the four subjects;
# notice the output message ``No maps selected for subject(s) 1, consider a
# more liberal threshold``. By default the threshold is set automatically by
# trying several values; here it may have chosen a threshold that is too high.
# Let's take a look at the ICA sources for each subject:
for index, (ica, raw) in enumerate(zip(icas, raws)):
fig = ica.plot_sources(raw)
fig.suptitle('Subject {}'.format(index))
###############################################################################
# Notice that subject 1 *does* seem to have an IC that looks like it reflects
# blink artifacts (component ``ICA000``). Notice also that subject 3 appears to
# have *two* components that are reflecting ocular artifacts (``ICA000`` and
# ``ICA002``), but only one was caught by :func:`~mne.preprocessing.corrmap`.
# Let's try setting the threshold manually:
corrmap(icas, template=(0, eog_inds[0]), threshold=0.9)
###############################################################################
# Now we get the message ``At least 1 IC detected for each subject`` (which is
# good). At this point we'll re-run :func:`~mne.preprocessing.corrmap` with
# parameters ``label=blink, show=False`` to *label* the ICs from each subject
# that capture the blink artifacts (without plotting them again).
corrmap(icas, template=(0, eog_inds[0]), threshold=0.9, label='blink',
plot=False)
print([ica.labels_ for ica in icas])
###############################################################################
# Notice that the first subject has 3 different labels for the IC at index 0:
# "eog/0/Fpz", "eog", and "blink". The first two were added by
# :meth:`~mne.preprocessing.ICA.find_bads_eog`; the "blink" label was added by
# the last call to :func:`~mne.preprocessing.corrmap`. Notice also that each
# subject has at least one IC index labelled "blink", and subject 3 has two
# components (0 and 2) labelled "blink" (consistent with the plot of IC sources
# above). The ``labels_`` attribute of :class:`~mne.preprocessing.ICA` objects
# can also be manually edited to annotate the ICs with custom labels. They also
# come in handy when plotting:
icas[3].plot_components(picks=icas[3].labels_['blink'])
icas[3].exclude = icas[3].labels_['blink']
icas[3].plot_sources(raws[3])
###############################################################################
# As a final note, it is possible to extract ICs numerically using the
# :meth:`~mne.preprocessing.ICA.get_components` method of
# :class:`~mne.preprocessing.ICA` objects. This will return a :class:`NumPy
# array <numpy.ndarray>` that can be passed to
# :func:`~mne.preprocessing.corrmap` instead of the :class:`tuple` of
# ``(subject_index, component_index)`` we passed before, and will yield the
# same result:
template_eog_component = icas[0].get_components()[:, eog_inds[0]]
corrmap(icas, template=template_eog_component, threshold=0.9)
print(template_eog_component)
###############################################################################
# An advantage of using this numerical representation of an IC to capture a
# particular artifact pattern is that it can be saved and used as a template
# for future template-matching tasks using :func:`~mne.preprocessing.corrmap`
# without having to load or recompute the ICA solution that yielded the
# template originally. Put another way, when the template is a NumPy array, the
# :class:`~mne.preprocessing.ICA` object containing the template does not need
# to be in the list of ICAs provided to :func:`~mne.preprocessing.corrmap`.
#
#
# References
# ^^^^^^^^^^
#
# .. [1] Ablin P, Cardoso J, Gramfort A (2018). Faster Independent Component
# Analysis by Preconditioning With Hessian Approximations. *IEEE
# Transactions on Signal Processing* 66:4040–4049.
# https://doi.org/10.1109/TSP.2018.2844203
#
# .. [2] Winkler I, Debener S, Müller K-R, Tangermann M (2015). On the
# influence of high-pass filtering on ICA-based artifact reduction in
# EEG-ERP. Proceedings of EMBC-2015, 4101–4105.
# https://doi.org/10.1109/EMBC.2015.7319296
#
# .. [3] Dammers J, Schiek M, Boers F, Silex C, Zvyagintsev M, Pietrzyk U,
# Mathiak K (2008). Integration of amplitude and phase statistics for
# complete artifact removal in independent components of neuromagnetic
# recordings. *IEEE Transactions on Biomedical Engineering*
# 55(10):2353–2362. https://doi.org/10.1109/TBME.2008.926677
#
# .. [4] Viola FC, Thorne J, Edmonds B, Schneider T, Eichele T, Debener S
# (2009). Semi-automatic identification of independent components
# representing EEG artifact. *Clinical Neurophysiology* 120(5):868–877.
# https://doi.org/10.1016/j.clinph.2009.01.015
#
# .. [5] Schalk G, McFarland DJ, Hinterberger T, Birbaumer N, Wolpaw JR (2004).
# BCI2000: A General-Purpose Brain-Computer Interface (BCI) System.
# *IEEE Transactions on Biomedical Engineering* 51(6):1034-1043.
# https://doi.org/10.1109/TBME.2004.827072
#
# .. [6] Goldberger AL, Amaral LAN, Glass L, Hausdorff JM, Ivanov PCh, Mark RG,
# Mietus JE, Moody GB, Peng C-K, Stanley HE (2000). PhysioBank,
# PhysioToolkit, and PhysioNet: Components of a New Research Resource
# for Complex Physiologic Signals. *Circulation* 101(23):e215-e220.
# https://doi.org/10.1161/01.CIR.101.23.e215
#
#
# .. LINKS
#
# .. _`blind source separation`:
# https://en.wikipedia.org/wiki/Signal_separation
# .. _`statistically independent`:
# https://en.wikipedia.org/wiki/Independence_(probability_theory)
# .. _`scikit-learn`: https://scikit-learn.org
# .. _`random seed`: https://en.wikipedia.org/wiki/Random_seed
# .. _`regular expression`: https://www.regular-expressions.info/
# .. _`qrs`: https://en.wikipedia.org/wiki/QRS_complex
# .. _`this EEGLAB tutorial`: https://labeling.ucsd.edu/tutorial/labels
|
mne-tools/mne-tools.github.io
|
0.20/_downloads/7e29f93bd4df8429aa43c25d9f1ede6c/plot_40_artifact_correction_ica.py
|
Python
|
bsd-3-clause
| 29,979
|
[
"Gaussian"
] |
f14b5ac97ad444cb13e87db6f5b57c27f53d95e5588e36ac77a7775fd938a702
|
"""
simulate the stochastic dynamics of Hopfield-like networks. We study the
problem of memory retrieval and memory navigation. This means the exploration
of memory states by non-equilibrium dynamics. To do so we use two models, one
is the Kanter model, the other is the dynamic energy model.
"""
import numpy as np
import os as os
import pickle
class Network:
"""
This class contains the network of neurons. This includes the state of the
neurons, the patterns, the hebbian connectivity matrix, as well as the
non-equilibrium modifications: delayed neuron state and delayed weights of
the patterns. The fields are updated in agreement with the chosen model,
and the overlaps and delayed overlaps can be calculated with the
corresponding functions.
"""
def __init__(self, N = 300, seed = 100, p = 10, k = 3, mu = 0, beta = 1.,
lamda = 1.5, tau = 8*300., correlation = 0 , model = 'hopfield'):
# Declare input variables
self.N = N # number of neurons
self.seed = seed # size of pattern mu seed
self.p = p # number of stored patterns
self.k = k # length of the sequence
self.mu = mu # initial pattern to be seeded
self.beta = beta # inverse temperature
self.lamda = lamda # non-equilibrium driving
self.tau = tau # delay time
self.correlation = correlation # correlation between patterns
self.model = model # model to be simulated
# Declare data structures
self.neurons = np.zeros(self.N) # neurons, S_i
self.patterns = np.zeros((self.p, self.N)) # patterns, xi_i^mu
self.field = np.zeros(self.N) # fields, h_i
self.hebbian = np.zeros((self.N,self.N)) # hebbian matrix, J_{ij}^1
# Generate patterns x_i^\mu, J_{ij}^1, and J_{ij}^2
self.patterns = 2*(np.random.randint(0, 2, (self.p, self.N)) - .5)
# Initialize values
if self.p<self.k: sys.exit('p<k: sequence too long')
self.initialize()
self.update()
def initialize(self):
"""
The first corr neurons are fixed to be the same for all patterns when
correlated. This introduces correlations between patterns. We also define
the hebbian weight, and initialize the neurons of the system in a state
with the first seed neurons equal to the corresponding values in the mu-th
pattern. Finally, the delayed weights are set to unity.
"""
# Correlate patterns
if self.correlation!=0:
self.patterns[:,-int(self.correlation):] = +1.
self.patterns[:,-int(self.correlation/2.):] = -1.
# Initialize hebbian weights
self.hebbian = np.tensordot(self.patterns, self.patterns, [[0], [0]]) \
/ self.N
# Set initial state of seed neurons to pattern mu
self.neurons = 2*(np.random.randint(0, 2, self.N) - .5)
self.neurons[:self.seed] = self.patterns[self.mu, :self.seed]
# Set delayed neurons and weights
self.delayed_neurons = np.zeros(self.N) # delayed neurons, \bar{S}_i
self.delayed_weights = np.ones(self.p) # weight of patterns, c_\mu
return 0
def update(self):
"""
This function updates the equilibrium field, the non-equilibrium field for
the kinetic model, and the energy weights for the energetic model. To do
so it reads the model used and calls the corresponding update function.
"""
cases = {'hopfield': self.update_equilibrium,
'kinetic': self.update_kinetic,
'kinetic-loop': self.update_kinetic_loop,
'kinetic-competing': self.update_kinetic_competing,
'energetic': self.update_energetic,
'energetic-loop': self.update_energetic_loop,
'energetic-modified': self.update_energetic_modified,
'energetic-modified-loop': self.update_energetic_modified_loop}
return cases[self.model]()
def update_equilibrium(self):
"""
Calculate the equilibrium fields. This is analogous to calculating the
energy difference in an Ising model, and it has to be done in every time
step. Unlike in the Isisng model, due to the global coupling, this is a
time-consuming operation.
"""
self.field = self.hebbian.dot(self.neurons)
return 0
def update_kinetic_loop(self):
"""
Calculate the fields for the kinetic dynamics in a loop. This involves
defining the shifted overlaps by rolling the delayed one (which we
calculate). The state of the delayed weights is also updated.
"""
# Calculate shifted overlaps
shifted_overlaps = np.roll(self.delayed_overlaps(), +1, axis=0)
# Update fields
self.field = self.hebbian.dot(self.neurons) + self.lamda \
* np.tensordot(self.patterns, shifted_overlaps, [[0],[0]] )
# Update delayed neurons
self.delayed_neurons = self.delayed_neurons * (1-1/self.tau) \
+ self.neurons / self.tau
return 0
def update_kinetic(self):
"""
Calculate the fields for the kinetic dynamics. This involves defining the
shifted overlaps of length , by rolling the delayed overlaps (which we
calculate). The state of the delayed weights is also updated.
"""
# Calculate shifted overlaps
shifted_overlaps = np.roll(self.delayed_overlaps(), +1, axis=0)
shifted_overlaps[self.k:] = 0. # remove transitions after k
shifted_overlaps[0] = 0. # remove N->1 transition
# Update fields
self.field = self.hebbian.dot(self.neurons) + self.lamda \
* np.tensordot(self.patterns, shifted_overlaps, [[0],[0]] )
# Update delayed neurons
self.delayed_neurons = self.delayed_neurons * (1-1/self.tau) \
+ self.neurons / self.tau
return 0
def update_kinetic_competing(self):
"""
Calculate the fields for competing kinetic dynamics. This involves
defining the shifted overlaps, by rolling the delayed overlaps (which we
calculate). The state of the delayed weights is also updated.
"""
# Calculate common path
if self.p - self.k < 2: sys.exit('p<k+2: cannot branch')
common = np.roll(self.delayed_overlaps(), +1, axis=0)
common[0], common[self.k:] = 0., 0.
# Calculate branched path
b = int((self.p - self.k) / 2.)
branch1 = np.roll(self.delayed_overlaps(), +1, axis=0)
branch1[:self.k], branch1[self.k+b:] = 0., 0.
branch2 = self.delayed_overlaps()[self.k+b-1:-1]
branch2[0] = self.delayed_overlaps()[self.k-1]
# Update fields
self.field = self.hebbian.dot(self.neurons) \
+ np.tensordot(self.patterns, common, [[0],[0]]) * self.lamda\
+ np.tensordot(self.patterns, branch1, [[0],[0]]) * self.lamda\
+ np.tensordot(self.patterns[self.k+b:], branch2,[[0],[0]])\
* self.lamda
# Update delayed neurons
self.delayed_neurons = self.delayed_neurons * (1-1/self.tau) \
+ self.neurons / self.tau
return 0
def update_energetic_loop(self):
"""
Calculate the fields for the energetic dynamics in a loop. This involves
creating a new hebbian matrix for the current delayed weights (which
slows down the update rule). After updating the fields, the state of
the delayed weights is also updated.
"""
# Calculate delayed hebbian matrix
weighted_patterns = self.patterns.transpose()*self.delayed_weights
weighted_patterns = weighted_patterns.transpose()
self.hebbian = np.tensordot( weighted_patterns, self.patterns,\
[[0], [0]] ) / self.N
# Update fields
self.field = self.hebbian.dot(self.neurons)
# Update delayed weights
self.delayed_weights = self.delayed_weights * (1-1/self.tau)\
+ np.roll( self.overlaps(), +1, axis=0)/ self.tau
return 0
def update_energetic(self):
"""
Calculate the fields for the energetic dynamics. This involves creating
a new hebbian matrix for the current delayed weights (which slows down the
update rule). After updating the fields, the state of the delayed
weights is also updated.
"""
# Calculate delayed hebbian matrix
weighted_patterns = self.patterns.transpose()*self.delayed_weights
weighted_patterns = weighted_patterns.transpose()
self.hebbian = np.tensordot( weighted_patterns, self.patterns,\
[[0], [0]] ) / self.N
# Update fields
self.field = self.hebbian.dot(self.neurons)
# Update delayed weights
shifted_overlaps = np.roll(self.overlaps(), +1, axis=0)
shifted_overlaps[self.k:] = 0. # remove transitions after k
shifted_overlaps[0] = 0. # remove N->1 transition
self.delayed_weights = self.delayed_weights * (1-1/self.tau) \
+ shifted_overlaps / self.tau
return 0
def update_energetic_modified_loop(self):
"""
Calculate the fields for the modified energetic dynamics. This involves
creating a new hebbian matrix for the current delayed weights (which
slows down the update rule). After updating the fields, the state of the
delayed weights is also updated.
"""
# Calculate delayed hebbian matrix
weighted_patterns = self.patterns.transpose()*self.delayed_weights
weighted_patterns = weighted_patterns.transpose()
self.hebbian = np.tensordot( weighted_patterns, self.patterns,\
[[0], [0]] ) / self.N
# Update fields
self.field = self.hebbian.dot(self.neurons)
# Update delayed weights
self.delayed_weights = self.delayed_weights * (1-1/self.tau)\
+ np.roll( np.abs( self.overlaps() ), +1, axis=0)\
/ self.tau
return 0
def update_energetic_modified(self):
"""
Calculate the fields for the modified energetic dynamics. This involves
creating a new hebbian matrix for the current delayed weights (which
slows down the update rule). After updating the fields, the state of the
delayed weights is also updated.
"""
# Calculate delayed hebbian matrix
weighted_patterns = self.patterns.transpose()*self.delayed_weights
weighted_patterns = weighted_patterns.transpose()
self.hebbian = np.tensordot( weighted_patterns, self.patterns,\
[[0], [0]] ) / self.N
# Update fields
self.field = self.hebbian.dot(self.neurons)
# Update delayed weights
shifted_overlaps = np.roll(self.overlaps(), +1, axis=0)
shifted_overlaps[self.k:] = 0. # remove transitions after k
shifted_overlaps[0] = 0. # remove N->1 transition
self.delayed_weights = self.delayed_weights * (1-1/self.tau)\
+ np.abs( shifted_overlaps ) / self.tau
return 0
def overlaps(self):
"""
Calculate the overlaps
"""
m = np.tensordot(self.patterns, self.neurons, [[1],[0]]) / self.N
return m
def delayed_overlaps(self):
"""
Calculate the delayed overlaps
"""
m = np.tensordot(self.patterns, self.delayed_neurons, [[1],[0]]) / self.N
return m
def pattern_correlations(self):
"""
Calculate the correlations between the stored patterns
"""
corr = np.tensordot(self.patterns, self.patterns, [[1],[1]]) / self.N
return corr
class RandomNumbers:
"""
This class generates the random numbers that will be used for the stochastic
dynamics. That is the sequence of neurons to be updated, and the value of
the "coin toss" used to determine whether the neuron is flipped.
"""
def __init__(self, N = 200, swipes = 1000):
self.sequence = np.random.randint(0, N, (swipes, N))
self.toss = np.random.rand(swipes, N)
class Simulation:
"""
This class prepares a simulation, and contains the functions necessary to
run the simulation. It takes taking all input parameters
and generating the randomness in the system, the structures, and seeds
and the lattice where to run the simulation. The simulation can then be run
with the desired method.
"""
def __init__(self, N = 300, seed = 50, p = 10, k = 3, mu = 0, beta = 1.,
lamda = 1.5, tau = 8*300., correlation = 0 , model = 'kinetic',
swipes = 100, ds = 1, path = './tmp/DATA'):
# Declare input variables
self.N = N # number of neurons
self.seed = seed # size of pattern mu seed
self.p = p # number of stored patterns
self.k = k # length of stored sequence
self.mu = mu # initial pattern to be seeded
self.beta = beta # inverse temperature
self.lamda = lamda # non-equilibrium driving
self.tau = tau # delay time
self.correlation = correlation # correlation between patterns
self.model = model # model to be simulated
self.swipes = swipes # lattice swipes, T = swipes * N^2
self.ds = ds # spacing between saves
self.path = path # path for data storage
# Generate network
net = Network( self.N, self.seed, self.p, self.k, self.mu, self.beta,
self.lamda, self.tau, self.correlation, self.model)
self.net = net # store class in simulation, for easy debugging
def run(self):
"""
Function that runs the simulation. It first generates the required path to
store files, then instances of Network and RandomNumbers it then loops
over "swipes" monte carlo steps.
"""
# Create path
if os.path.isdir(self.path)==False: os.makedirs(self.path)
# Generate random numbers and intialize network
random = RandomNumbers(self.N, self.swipes)
self.net.initialize()
# Loop over monte-carlo swipes
for s in np.arange(0,self.swipes):
# Store data
if s%self.ds==0==0: self.store(s, self.net)
# Update lattice with mc swipe
self.mc_update(s, self.net, random)
return 0
def store(self, s, net):
"""
Store data in path. For the initial state the whole network class is
stored as a pickle file. After that, only the lattice state is stored as
binary array.
"""
d = int(np.log10(self.swipes)) # number of digits used for filename
# Save net class in the initial state
if s==0:
with open(self.path + '/net.pkl', 'wb') as f: pickle.dump(net, f)
# Save lattice state
with open(self.path + '/S_' + ("%0"+str(d+1)+"d")%s + '.npy', 'wb') as f:
np.save(f, net.neurons)
return 0
def mc_update(self, s, net, random):
"""
Update rule using Glauber dynamics. N random neurons are sequentally
chosen. The field experienced in this neurons is calculated. This
determines the transition matrix W, and ultimately biases the transition
"""
# Do N monte-carlo steps (one swipe) over the random sequence
for k in np.arange(self.N):
# Choose neuron i and calculate state change
i = random.sequence[s, k]
delta_sigma = -2*net.neurons[i]
# Calculate energy gap dU and transition rate W
dU = self.beta * delta_sigma * net.field[i]
W = np.exp(dU/2) / (np.exp(dU/2) + np.exp(-dU/2))
# Update neuron and fields
if random.toss[s, k] < W: net.neurons[i] = - net.neurons[i]
net.update()
return 0
|
pablosv/memexp
|
memories/memexp.py
|
Python
|
gpl-3.0
| 14,995
|
[
"NEURON"
] |
6b408b7f7c7077418046961083b0895df37a0f9959b382e26539b4c6b516a36e
|
#########################################################################
# Ryuretic: A Modular Framework for RYU #
# !/ryu/ryu/app/Ryuretic/Ryuretic_Intf.py #
# Authors: #
# Jacob Cox (jcox70@gatech.edu) #
# Sean Donovan (sdonovan@gatech.edu) #
# Ryuretic_Intf.py #
# date 28 April 2016 #
#########################################################################
# Copyright (C) 2016 Jacob Cox - All Rights Reserved #
# You may use, distribute and modify this code under the #
# terms of the Ryuretic license, provided this work is cited #
# in the work for which it is used. #
# For latest updates, please visit: #
# https://github.com/Ryuretic/RAP #
#########################################################################
"""How To Run This Program
1) Ensure you have Ryu installed.
2) Save the following files to /home/ubuntu/ryu/ryu/app/Ryuretic directory
a) Ryuretic_Intf.py
b) Ryuretic.py
c) Pkt_Parse13.py
d) switch_mod13.py
3) In your controller terminal type: cd ryu
4) Enter PYTHONPATH=. ./bin/ryu-manager ryu/app/Ryuretic/Ryuretic_Intf_v1.py
"""
#########################################################################
from Ryuretic import coupler
#################1 Import Needed Libraries 1###################
#[1] Import needed libraries here #
#########################################################################
import string, random
class Ryuretic_coupler(coupler):
def __init__(self, *args, **kwargs):
super(Ryuretic_coupler, self).__init__(*args, **kwargs)
############## 2 Add User Variables 2 ###################
#[2] Add new global variables here. #
# Ex. ICMP_ECHO_REQUEST = 8, self.netView = {} #
#################################################################
self.cntrl = {'mac':'ca:ca:ca:ad:ad:ad','ip':'192.168.0.40','port':None}
self.validNAT = {'mac':'aa:aa:aa:aa:aa:aa','ip':'192.168.0.224'}
self.t_agentIP = '192.168.0.1'
self.t_agent = {} #Records TA parameter from respond_to_ping
self.dns_tbl = {} #Use to redirect DNS
self.tcp_tbl = {} #Use to redirect TCP
self.port_mac_map = {} #Used by multi-mac detector
self.port_AV = {} #Tracks per port Time-2-ack average
self.tta = {} #Tracks TCP handshake per (src,srcip,srcport,dstip)
self.tcpConnCount = 0 #Future var for tracking total TCP connections
self.policyTbl = {} #Tracks policies applied to port/mac
self.netView = {} #Maps switch connections by port,mac,ip
self.portTbl, self.macTbl, self.ipTbl = {},{},{}
self.testIP = '0.0.0.0' #'192.168.0.22'
#self.portTbl[9]='test'
#self.macTbl['aa:aa:aa:aa:00:22'] = 'test'
#self.ipTbl['192.168.0.22'] = 'test'
#Assigns flag to MAC/Port
self.keyID = 101
ICMP_ECHO_REPLY = 0
ICMP_ECHO_REQUEST = 8
################ 3 Proactive Rule Sets 3 ###################
#[3] Insert proactive rules defined below. Follow format below #
# Options include drop or redirect, fwd is the default. #
#####################################################################
def get_proactive_rules(self, dp, parser, ofproto):
return None, None
#fields, ops = self.honeypot(dp, parser, ofproto)
#return fields, ops
################# 4 Reactive Rule Sets 4 #####################
#[4] use below handles to direct packets to reactive user modules #
# defined in location #[5]. If no rule is added, then #
# the default self.default_Fields_Ops(pkt) must be used #
#####################################################################
# Determine highest priority fields and ops pair, if needed #
# xfields = [fields0, fields1, fields2] #
# xops = [ops0, ops1, ops2] #
# fields,ops = self._build_FldOps(xfields,xops) #
#####################################################################
def handle_eth(self,pkt):
print "Handle Ether: ", pkt['srcmac'],'->',pkt['dstmac']
fields, ops = self.default_Field_Ops(pkt)
self.install_field_ops(pkt,fields,ops)
#def handle_arp(self,pkt):
#print "-------------------------------------------------------------"
#print "Handle ARP: ",pkt['srcmac'],"->",pkt['dstmac']
#print "Handle ARP: ",pkt['srcip'],"->",pkt['dstip']
#fields, ops = self.respond_to_arp(pkt)
##Determin if mac or port has a status
##pkt_status = self.check_net_tbl(pkt['srcmac'],pkt['inport'])
##print pkt_status
#self.install_field_ops(pkt,fields,ops)
def handle_arp(self,pkt):
print "-------------------------------------------------------------"
print "Handle ARP: ",pkt['srcmac'],"->",pkt['dstmac']
print "Handle ARP: ",pkt['srcip'],"->",pkt['dstip']
fields, ops = self.respond_to_arp(pkt)
self.install_field_ops(pkt,fields,ops)
def handle_ip(self,pkt):
print "-------------------------------------------------------------"
print "Handle IP"
#fields, ops = self.TTL_Check(pkt) #Lab 9
fields, ops = self.default_Field_Ops(pkt)
self.install_field_ops(pkt,fields,ops)
def handle_icmp(self,pkt):
print "-------------------------------------------------------------"
print "Handle ICMP: ",pkt['srcmac'],"->",pkt['dstmac']
print "Handle ICMP: ",pkt['srcip'],"->",pkt['dstip']
fields,ops = self.respond_to_ping(pkt)
self.install_field_ops(pkt, fields, ops)
def handle_tcp(self,pkt):
#print "-------------------------------------------------------------"
#print "Handle TCP: ",pkt['srcmac'],"->",pkt['dstmac']
#print "Handle TCP: ",pkt['srcip'],"->",pkt['dstip']
#print "Handle TCP: ",pkt['srcport'],"->",pkt['dstport']
pkt_status = self.check_ip_tbl(pkt)
if pkt_status == 'test': #test src and dest
fields,ops = self.redirect_TCP(pkt)
elif pkt_status == 'deny':
fields,ops = self.redirect_TCP(pkt)
else:
#fields,ops = self.default_Field_Ops(pkt)
#fields,ops = self.test_TCP(pkt)
fields,ops = self.TTA_analysis(pkt)
self.install_field_ops(pkt, fields, ops)
def test_TCP(self,pkt):
fields,ops = self.default_Field_Ops(pkt)
if pkt['srcip'] == self.testIP:
print "IP detected: ", pkt['srcip']
self.flagHost(pkt,'test')
fields,ops=self.redirect_TCP(pkt)
return fields,ops
return fields,ops
def redirect_TCP(self,pkt):
print "Redirect_TCP: "
print "pkt info: ", pkt['srcmac'],' ',pkt['dstmac'],' ',pkt['srcip'],' ',pkt['dstip']
print pkt['srcport'],' ',pkt['dstport']
#Uses ipTbl, tcp_tbl, and t_agent
fields,ops = self.default_Field_Ops(pkt)
if self.ipTbl.has_key(pkt['srcip']):
if self.ipTbl[pkt['srcip']] in ['test','deny']:
print "ipTbl Contents", self.ipTbl
key = (pkt['srcip'],pkt['srcport'])
print "Key is : ", key
self.tcp_tbl[key] = {'dstip':pkt['dstip'],'dstmac':pkt['dstmac'],
'dstport':pkt['dstport']}
fields.update({'srcmac':pkt['srcmac'],'srcip':pkt['srcip']})
fields.update({'dstmac':self.t_agent['mac'],'dstip':self.t_agent['ip']})
#if pkt['dstport'] == 443:
#fields['dstport'] = 80
ops = {'hard_t':None, 'idle_t':None, 'priority':100,\
'op':'mod', 'newport':self.t_agent['port']}
print "TCP Table: ", self.tcp_tbl[key]
elif self.ipTbl.has_key(pkt['dstip']):
print "Returning to ", pkt['dstip']
if self.ipTbl[pkt['dstip']] in ['test','deny']:
print "ipTbl Contents", self.ipTbl
key = (pkt['dstip'],pkt['dstport'])
print "Key and table: ", key, ' ', self.tcp_tbl[key]
fields.update({'srcmac':self.tcp_tbl[key]['dstmac'],
'srcip':self.tcp_tbl[key]['dstip']})
#if self.tcp_tbl[key]['dstport'] == 443:
#fields.update({'srcport':443})
fields.update({'dstmac':pkt['dstmac'], 'dstip':pkt['dstip']})
ops = {'hard_t':None, 'idle_t':None, 'priority':100,\
'op':'mod', 'newport':None}
#self.tcp_tbl.pop(key)
#print "TCP Table: ", self.tcp_tbl
return fields, ops
# Add flag to policyTbl, macTbl, portTbl
def flagHost(self,pkt,flag):
print 'Flag Host: ', pkt['srcmac'],'->',flag
self.macTbl[pkt['srcmac']]={'stat':flag,'port':pkt['inport'],
'ip':pkt['srcip']}
self.portTbl[pkt['inport']]=flag
self.ipTbl[pkt['srcip']] = flag
if flag != 'norm':
keyID = self.keyID
self.keyID += 1
#create passkey
passkey =''.join(random.choice(string.ascii_letters) for x in range(8))
#update policy table
self.policyTbl[keyID]={'inport':pkt['inport'],'srcmac':pkt['srcmac'],
'ip':pkt['srcip'],'passkey':passkey,'stat':flag}
#Notify trusted agent of newly flagged client
self.update_TA(pkt, keyID, 'l') #load message'
def handle_udp(self,pkt):
print "-------------------------------------------------------------"
print "Handle UDP: ",pkt['srcmac'],"->",pkt['dstmac']
print "Handle UDP: ",pkt['srcip'],'->',pkt['dstip']
#Added to build MAC and port associations
pkt_status = self.check_ip_tbl(pkt)
if pkt_status == 'test': #test src and dest
fields,ops = self.redirect_DNS(pkt)
elif pkt_status == 'deny':
fields,ops = self.redirect_DNS(pkt)
else:
fields,ops = self.test_DNS(pkt)
self.install_field_ops(pkt, fields, ops)
def test_DNS(self,pkt):
print "Testing DNS"
fields,ops = self.default_Field_Ops(pkt)
if pkt['srcip'] == self.testIP:
print "IP detected: ", pkt['srcip']
self.flagHost(pkt,'test')
fields,ops=self.redirect_DNS(pkt)
return fields,ops
return fields,ops
def redirect_DNS(self,pkt):
print "Redirect_DNS: "
#Uses macTbl, dns_tbl, and t_agent
fields,ops = self.default_Field_Ops(pkt)
if self.ipTbl.has_key(pkt['srcip']):
if self.ipTbl[pkt['srcip']]== 'test':
key = (pkt['srcip'],pkt['srcport'])
print key
self.dns_tbl[key] = {'dstip':pkt['dstip'],'dstmac':pkt['dstmac']}
fields.update({'dstmac':self.t_agent['mac'],
'dstip':self.t_agent['ip']})
fields.update({'srcmac':pkt['srcmac'],'srcip':pkt['srcip']})
ops = {'hard_t':None, 'idle_t':None, 'priority':100,\
'op':'mod', 'newport':self.t_agent['port']}
elif self.ipTbl.has_key(pkt['dstip']):
if self.ipTbl[pkt['dstip']]== 'test':
key = (pkt['dstip'],pkt['dstport'])
print key
fields.update({'srcmac':self.dns_tbl[key]['dstmac'],
'srcip':self.dns_tbl[key]['dstip']})
fields.update({'dstmac':pkt['dstmac'], 'dstip':pkt['dstip']})
ops = {'hard_t':None, 'idle_t':None, 'priority':100,\
'op':'mod', 'newport':None}
#self.dns_tbl.pop(key)
#print "DNS Table: ", self.dns_tbl
return fields, ops
#Check status of port and mac.
def check_ip_tbl(self,pkt):
#print "Check_ip_tbl:"
srcip,dstip = pkt['srcip'],pkt['dstip']
if self.ipTbl.has_key(srcip):
#print "Found: ", srcip,'->', self.ipTbl[srcip]
return self.ipTbl[srcip]
elif self.ipTbl.has_key(dstip):
#print "Found: ", dstip,'->', self.ipTbl[dstip]
return self.ipTbl[dstip]
else:
#print "Not Found: ", srcip, ', ', dstip
return 'No_Flag'
# All packets not defined above are handled here.
def handle_unk(self,pkt):
print "-------------------------------------------------------------"
print "Handle Uknown"
fields, ops = self.default_Field_Ops(pkt)
self.install_field_ops(pkt, fields, ops)
######################################################################
# The following are from the old NFG file.
def default_Field_Ops(self,pkt):
def _loadFields(pkt):
#keys specifies match fields for action. Default is
#inport and srcmac. ptype used for craft icmp, udp, etc.
fields = {'keys':['inport','srcmac'],'ptype':[], 'dp':pkt['dp'],
'ofproto':pkt['ofproto'], 'msg':pkt['msg'],
'inport':pkt['inport'], 'srcmac':pkt['srcmac'],
'ethtype':pkt['ethtype'], 'dstmac':None, 'srcip':None,
'proto':None, 'dstip':None, 'srcport':None, 'dstport':None,
'com':None, 'id':0}
return fields
def _loadOps():
#print "Loading ops"
#Specifies the timeouts, priority, operation and outport
#options for op: 'fwd','drop', 'mir', 'redir', 'craft'
ops = {'hard_t':None, 'idle_t':None, 'priority':10, \
'op':'fwd', 'newport':None}
return ops
#print "default Field_Ops called"
fields = _loadFields(pkt)
ops = _loadOps()
return fields, ops
######################################################################
############ 5 Ryuretic Network Application Modules 5 ##############
#[5] Add user created methods below. Examples are provided to assist #
# the user with basic python, dictionary, list, and function calls #
######################################################################
# Confirm mac has been seen before and no issues are recorded
def TTL_Check(self, pkt):
#initialize fields and ops with default settings
fields, ops = self.default_Field_Ops(pkt)
if pkt['srcmac'] != self.validNAT['mac']:
if pkt['ttl']==63 or pkt['ttl']==127:
print 'TTL Decrement Detected on ',pkt['srcmac'],' TTL is :',pkt['ttl']
fields, ops = self.add_drop_params(pkt,fields,ops)
else:
ops['idle_t'] = 5
print "Packet TTL: ", pkt['ttl'], ' ', pkt['srcip'],' ', \
pkt['inport'],' ', pkt['srcmac']
else:
ops['idle_t'] = 20
priority = 10
return fields, ops
def Multi_MAC_Checker(self, pkt):
fields, ops = self.default_Field_Ops(pkt)
print "*** Checking MAC ***"
#self.port_mac_map = {}
if self.port_mac_map.has_key(pkt['inport']):
if pkt['srcmac'] != self.port_mac_map[pkt['inport']]:
print " Multi-mac port detected "
fields, ops = self.add_drop_params(pkt,fields,ops)
else:
fields, ops = self.fwd_persist(pkt,fields,ops)
else:
self.port_mac_map[pkt['inport']] = pkt['srcmac']
return fields, ops
#change name to monitor_TCP for RAP
def TTA_analysis(self,pkt):
fields, ops = self.default_Field_Ops(pkt)
bits = pkt['bits']
dst, dstip, dstport = pkt['dstmac'], pkt['dstip'], pkt['dstport']
src, srcip, srcport = pkt['srcmac'], pkt['srcip'], pkt['srcport']
inport = pkt['inport']
send = (src,srcip,srcport,dstip)
arrive = (dst,dstip,dstport,srcip)
t_in = pkt['t_in']
#print"*****\n"+self.tta+"/n******/n"+self.port_AV+"/n*****"
if bits == 20:
if self.tta.has_key(send):
self.tta[send]['stage'] = 0
elif self.tta.has_key(arrive):
#print pkt
self.tta[arrive]['stage'] = 0
return fields, ops
if bits == 2:
if self.tta.has_key(send):
self.tta[send].update({'inport':inport,'stage':1})
else:
self.tta.update({send:{'inport':inport,'stage':1}})
return fields, ops
if bits == 18:
if self.tta.has_key(arrive):
if self.tta[arrive]['stage']==1:
self.tta[arrive].update({'syn':t_in,'stage':2})
return fields,ops
if bits == 16:
if self.tta.has_key(send):
if self.tta[send]['stage']==2:
tta = t_in - self.tta[send]['syn']
self.tta[send].update({'stage':3, 'ack':t_in, 'tta':tta})
#print '** Calc TTA :', tta
if self.port_AV.has_key(self.tta[send]['inport']):
portAV = ((self.port_AV[self.tta[send]['inport']] * \
9) + tta)/10
self.port_AV[self.tta[send]['inport']] = portAV
else:
portAV = ((0.001*9)+tta)/10
self.port_AV.update({self.tta[send]['inport']:portAV})
#print "****"
#print "Port and TTA: ", inport, self.tta[send]['tta']
print '****\nPort Averages: ', self.port_AV, '\n****'
#print "****"
del self.tta[send]
return fields, ops
#print "Persist"
fields, ops = self.tcp_persist(pkt,fields,ops)
return fields, ops
if bits == 24:
#print "HTTP Push"
return fields, ops
if bits == 17:
print 'Port Averages: ', self.port_AV
if self.tta.has_key(send):
del self.tta[send]
elif self.tta.has_key(arrive):
del self.tta[arrive]
return fields, ops
print "Packet not addressed", bits, inport, src, dstip
return fields, ops
# Call to temporarily install drop parameter for a packet to switch
def add_drop_params(self, pkt, fields, ops):
#may need to include priority
fields['keys'] = ['inport']
fields['inport'] = pkt['inport']
ops['priority'] = 100
ops['idle_t'] = 60
ops['op']='drop'
return fields, ops
# Call to temporarily install TCP flow connection on switch
def tcp_persist(self, pkt,fields,ops):
#print "TCP_Persist: ", pkt['srcmac'],'->', pkt['dstmac']
#print "TCP_Persist: ", pkt['srcip'],'->',pkt['dstip']
fields['keys'] = ['inport', 'srcmac', 'srcip', 'ethtype', 'srcport']
fields['srcport'] = pkt['srcport']
fields['srcip'] = pkt['srcip']
ops['idle_t'] = 5
ops['priority'] = 10
return fields, ops
def fwd_persist(self, pkt,fields,ops):
ops['idle_t'] = 3
ops['priority'] = 10
return fields, ops
def arp_persist(self, pkt):
fields, ops = self.default_Field_Ops(pkt)
fields['keys'] = ['inport','srcmac','ethtype']
ops['idle_t'] = 10
ops['priority'] = 2
return fields, ops
################################################################
"""
The following code is implemented to allow the trusted agent to comm
with the controller and vice versa.
"""
################################################################
#Receive and respond to arp
def respond_to_arp(self,pkt):
print 'Respond to Arp:', pkt['srcmac'],'->',pkt['dstmac']
print 'Respond to Arp:', pkt['srcip'],'->',pkt['dstip']
fields, ops = self.default_Field_Ops(pkt)
#Added to build MAC and port associations
if not self.macTbl.has_key(pkt['srcmac']):
self.macTbl[pkt['srcmac']] = {'port':pkt['inport'], 'stat':'unk'}
if pkt['dstip'] == self.cntrl['ip']:
print "Message to Controller"
fields['keys']=['srcmac', 'srcip', 'ethtype', 'inport']
fields['ptype'] = 'arp'
fields['dstip'] = pkt['srcip']
fields['srcip'] = self.cntrl['ip']
fields['dstmac'] = pkt['srcmac']
fields['srcmac'] = self.cntrl['mac']
fields['ethtype'] = 0x0806
ops['op'] = 'craft'
ops['newport'] = pkt['inport']
#print "INPORT: ", pkt['inport']
return fields, ops
#Respond to ping. Forward or respond if to cntrl from trusted agent.
def respond_to_ping(self,pkt):
def get_fields(keyID):
srcmac = self.policyTbl[keyID]['srcmac']
inport = self.policyTbl[keyID]['inport']
srcip = self.policyTbl[keyID]['ip']
print inport, ', ', srcmac, ', ', srcip
return srcmac, inport, srcip
def remove_keyID(keyID):
print "Policy Table Contents: ", self.policyTbl
if self.policyTbl.has_key(keyID):
srcmac, inport, srcip = get_fields(keyID)
if self.macTbl.has_key(srcmac):
print "Removing MAC", srcmac
self.macTbl.pop(srcmac)
if self.portTbl.has_key(inport):
print "Removing Port", inport
self.portTbl.pop(inport)
if self.ipTbl.has_key(srcip):
print "Removing IP", srcip
self.ipTbl.pop(srcip)
self.policyTbl.pop(keyID)
print "Respond to Ping: ", pkt['srcmac'],'->',pkt['dstmac']
fields, ops = self.default_Field_Ops(pkt)
if pkt['dstip'] == self.cntrl['ip'] and pkt['srcip'] == self.t_agentIP:
#print'respond to ping'
rcvData = pkt['data'].data
#Actions {a-acknowledge, i-init, d-delete, r-result, v-verify}
#action, keyID = rcvData.split(',')
#keyID = keyID.rstrip(' \t\r\n\0')
print rcvData
try:
action, keyID, result = rcvData.split(',')
result = result.rstrip(' \t\r\n\0')
print "Received Result"
except:
action, keyID = rcvData.split(',')
print "Received Revocation."
keyID = keyID.rstrip(' \t\r\n\0')
print "Key ID Length: ", len(keyID)
keyID = int(keyID)
print "KeyID is ", keyID, ', ', type(keyID)
print "Action is ", action, "\n\n\n*********"
######################################################
if action == 'i':
self.t_agent = {'ip':pkt['srcip'],'mac':pkt['srcmac'],
'port':pkt['inport'],'msg':pkt['msg'],
'ofproto':pkt['ofproto'], 'dp':pkt['dp']}
print "T_AGENT Loaded"
elif action == 'd':
#Deleting flagged host policy
print "Removing (",keyID,") from Policy Table"
print "Existing Keys: ", self.policyTbl.keys()
remove_keyID(keyID)
elif action == 'r':
print "Validating result"
print "Key present?", self.policyTbl.has_key(keyID)
if self.policyTbl.has_key(keyID):
print "Test Result is: ", result
if result == 'P':
print "Removing keyID"
remove_keyID(keyID)
elif result =='F':
print "Flagging Host: ", self.policyTbl[keyID]['ip']
self.policyTbl[keyID]['stat'] = 'deny'
srcmac, inport, srcip = get_fields(keyID)
self.macTbl[srcmac].update({'stat':'deny'})
self.portTbl[inport],self.ipTbl[srcip] ='deny','deny'
self.update_TA(pkt, keyID,'e') #send edit message
#Notify TA of update_TA(self,pkt, keyID)
else:
print "An Error Occured"
elif action is 'u':
#This is more complicated it requires data not being stored
#may need to add fields to policyTable. Maybe not.
pass
elif action is 'a':
#Acknowledge receipt
pass
else:
print "No match"
fields.update({'srcmac':self.cntrl['mac'], 'dstmac':pkt['srcmac']})
fields.update({'srcip':self.cntrl['ip'], 'dstip':pkt['srcip']})
fields.update({'ptype':'icmp','ethtype':0x0800, 'proto':1})
fields['com'] = 'a,'+rcvData
ops.update({'op':'craft', 'newport':pkt['inport']})
return fields, ops
#Crafts tailored ICMP message for trusted agent
def update_TA(self,pkt, keyID, message):
table = self.policyTbl[keyID]
print 'Update Table: ', pkt['srcmac'],'->',keyID,'->',table['stat']
print 'Update Table: ', table['srcmac'],'->',keyID,'->',table['stat']
#print "Updating Trusted Agent"
fields, ops = {},{}
fields['keys'] = ['inport', 'srcip']
fields.update({'dstip':self.t_agent['ip'], 'srcip':self.cntrl['ip']})
fields.update({'dstmac':self.t_agent['mac'], 'srcmac':self.cntrl['mac']})
fields.update({'dp':self.t_agent['dp'], 'msg':self.t_agent['msg']})
fields.update({'inport':self.t_agent['port'],'ofproto':\
self.t_agent['ofproto']})
fields.update({'ptype':'icmp', 'ethtype':0x0800, 'proto':1, 'id':0})
fields['com'] = message+','+table['srcmac']+','+str(table['inport'])+\
','+str(table['passkey'])+','+table['stat']+\
','+str(keyID)
ops = {'hard_t':None, 'idle_t':None, 'priority':0, \
'op':'craft', 'newport':self.t_agent['port']}
self.install_field_ops(pkt, fields, ops)
################################################################
"""
The following code controls the redirection of packets from their intended
destination to our trusted agent. This occurs when a port is flagged.
"""
################################################################
#Create a method to inject a redirect anytime the sta4 IP address is
#Check status of port and mac.
def check_net_tbl(self,pkt):
mac, ip, port = pkt['srcmac'], pkt['srcip'], pkt['inport']
print "(536) Check NetTbl: ", mac, ' & ', port,'->',self.macTbl.keys()
if mac in self.macTbl.keys():
#print "Found: ", mac,'->', self.macTbl[mac]['stat']
return self.macTbl[mac]['stat']
elif port in self.portTbl.keys():
#print "Port ", port, " found in table."
return self.portTbl[port]
elif ip in self.ipTbl.keys():
#print "IP ", ip, " found in table."
return self.ipTbl[ip]
else:
#print "Not Found: ", mac
return 'new'
#Redirect ICMP packets to trusted agent
def Icmp_Redirect(self,pkt):
print "Redirecting ICMP", pkt['srcmac'],'->',pkt['dstmac'],'||',self.t_agent['mac']
fields, ops = self.default_Field_Ops(pkt)
fields['keys'] = ['inport', 'ethtype']
fields['dstmac'] = self.t_agent['mac']
fields['dstip'] = self.t_agent['ip']
fields['ethtype'] = pkt['ethtype']
ops['op'] = 'redir'
ops['newport'] = self.t_agent['port']
ops['priority'] = 100
ops['idle_t'] = 180
#ops['hard_t'] = 180
return fields, ops
|
Ryuretic/RAP
|
ryu/ryu/app/Ryuretic/Ryuretic_Intf_v6.py
|
Python
|
apache-2.0
| 24,311
|
[
"VisIt"
] |
fecfe8ce0c73947411afeee6bb503842134871626e8d9a26d336b0b3bc8fcae7
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2020-2021, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""Dependency structs."""
# FIXME: add caching all over the place
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from collections import namedtuple
from collections.abc import MutableSequence
from glob import iglob
from urllib.parse import urlparse
from yaml import safe_load
try:
from typing import TYPE_CHECKING
except ImportError:
TYPE_CHECKING = False
if TYPE_CHECKING:
from typing import Type, TypeVar
from ansible.galaxy.collection.concrete_artifact_manager import (
ConcreteArtifactsManager,
)
Collection = TypeVar(
'Collection',
'Candidate', 'Requirement',
'_ComputedReqKindsMixin',
)
from ansible.errors import AnsibleError
from ansible.galaxy.api import GalaxyAPI
from ansible.module_utils._text import to_bytes, to_native, to_text
from ansible.module_utils.common._collections_compat import MutableMapping
from ansible.module_utils.common.arg_spec import ArgumentSpecValidator
from ansible.utils.collection_loader import AnsibleCollectionRef
from ansible.utils.display import Display
_ALLOW_CONCRETE_POINTER_IN_SOURCE = False # NOTE: This is a feature flag
_GALAXY_YAML = b'galaxy.yml'
_MANIFEST_JSON = b'MANIFEST.json'
_SOURCE_METADATA_FILE = b'GALAXY.yml'
display = Display()
def get_validated_source_info(b_source_info_path, namespace, name, version):
source_info_path = to_text(b_source_info_path, errors='surrogate_or_strict')
if not os.path.isfile(b_source_info_path):
return None
try:
with open(b_source_info_path, mode='rb') as fd:
metadata = safe_load(fd)
except OSError as e:
display.warning(
f"Error getting collection source information at '{source_info_path}': {to_text(e, errors='surrogate_or_strict')}"
)
return None
if not isinstance(metadata, MutableMapping):
display.warning(f"Error getting collection source information at '{source_info_path}': expected a YAML dictionary")
return None
schema_errors = _validate_v1_source_info_schema(namespace, name, version, metadata)
if schema_errors:
display.warning(f"Ignoring source metadata file at {source_info_path} due to the following errors:")
display.warning("\n".join(schema_errors))
display.warning("Correct the source metadata file by reinstalling the collection.")
return None
return metadata
def _validate_v1_source_info_schema(namespace, name, version, provided_arguments):
argument_spec_data = dict(
format_version=dict(choices=["1.0.0"]),
download_url=dict(),
version_url=dict(),
server=dict(),
signatures=dict(
type=list,
suboptions=dict(
signature=dict(),
pubkey_fingerprint=dict(),
signing_service=dict(),
pulp_created=dict(),
)
),
name=dict(choices=[name]),
namespace=dict(choices=[namespace]),
version=dict(choices=[version]),
)
if not isinstance(provided_arguments, dict):
raise AnsibleError(
f'Invalid offline source info for {namespace}.{name}:{version}, expected a dict and got {type(provided_arguments)}'
)
validator = ArgumentSpecValidator(argument_spec_data)
validation_result = validator.validate(provided_arguments)
return validation_result.error_messages
def _is_collection_src_dir(dir_path):
b_dir_path = to_bytes(dir_path, errors='surrogate_or_strict')
return os.path.isfile(os.path.join(b_dir_path, _GALAXY_YAML))
def _is_installed_collection_dir(dir_path):
b_dir_path = to_bytes(dir_path, errors='surrogate_or_strict')
return os.path.isfile(os.path.join(b_dir_path, _MANIFEST_JSON))
def _is_collection_dir(dir_path):
return (
_is_installed_collection_dir(dir_path) or
_is_collection_src_dir(dir_path)
)
def _find_collections_in_subdirs(dir_path):
b_dir_path = to_bytes(dir_path, errors='surrogate_or_strict')
subdir_glob_pattern = os.path.join(
b_dir_path,
# b'*', # namespace is supposed to be top-level per spec
b'*', # collection name
)
for subdir in iglob(subdir_glob_pattern):
if os.path.isfile(os.path.join(subdir, _MANIFEST_JSON)):
yield subdir
elif os.path.isfile(os.path.join(subdir, _GALAXY_YAML)):
yield subdir
def _is_collection_namespace_dir(tested_str):
return any(_find_collections_in_subdirs(tested_str))
def _is_file_path(tested_str):
return os.path.isfile(to_bytes(tested_str, errors='surrogate_or_strict'))
def _is_http_url(tested_str):
return urlparse(tested_str).scheme.lower() in {'http', 'https'}
def _is_git_url(tested_str):
return tested_str.startswith(('git+', 'git@'))
def _is_concrete_artifact_pointer(tested_str):
return any(
predicate(tested_str)
for predicate in (
# NOTE: Maintain the checks to be sorted from light to heavy:
_is_git_url,
_is_http_url,
_is_file_path,
_is_collection_dir,
_is_collection_namespace_dir,
)
)
class _ComputedReqKindsMixin:
def __init__(self, *args, **kwargs):
if not self.may_have_offline_galaxy_info:
self._source_info = None
else:
# Store Galaxy metadata adjacent to the namespace of the collection
# Chop off the last two parts of the path (/ns/coll) to get the dir containing the ns
b_src = to_bytes(self.src, errors='surrogate_or_strict')
b_path_parts = b_src.split(to_bytes(os.path.sep))[0:-2]
b_path = to_bytes(os.path.sep).join(b_path_parts)
info_path = self.construct_galaxy_info_path(b_path)
self._source_info = get_validated_source_info(
info_path,
self.namespace,
self.name,
self.ver
)
@classmethod
def from_dir_path_as_unknown( # type: ignore[misc]
cls, # type: Type[Collection]
dir_path, # type: bytes
art_mgr, # type: ConcreteArtifactsManager
): # type: (...) -> Collection
"""Make collection from an unspecified dir type.
This alternative constructor attempts to grab metadata from the
given path if it's a directory. If there's no metadata, it
falls back to guessing the FQCN based on the directory path and
sets the version to "*".
It raises a ValueError immediatelly if the input is not an
existing directory path.
"""
if not os.path.isdir(dir_path):
raise ValueError(
"The collection directory '{path!s}' doesn't exist".
format(path=to_native(dir_path)),
)
try:
return cls.from_dir_path(dir_path, art_mgr)
except ValueError:
return cls.from_dir_path_implicit(dir_path)
@classmethod
def from_dir_path(cls, dir_path, art_mgr):
"""Make collection from an directory with metadata."""
b_dir_path = to_bytes(dir_path, errors='surrogate_or_strict')
if not _is_collection_dir(b_dir_path):
display.warning(
u"Collection at '{path!s}' does not have a {manifest_json!s} "
u'file, nor has it {galaxy_yml!s}: cannot detect version.'.
format(
galaxy_yml=to_text(_GALAXY_YAML),
manifest_json=to_text(_MANIFEST_JSON),
path=to_text(dir_path, errors='surrogate_or_strict'),
),
)
raise ValueError(
'`dir_path` argument must be an installed or a source'
' collection directory.',
)
tmp_inst_req = cls(None, None, dir_path, 'dir', None)
req_name = art_mgr.get_direct_collection_fqcn(tmp_inst_req)
req_version = art_mgr.get_direct_collection_version(tmp_inst_req)
return cls(req_name, req_version, dir_path, 'dir', None)
@classmethod
def from_dir_path_implicit( # type: ignore[misc]
cls, # type: Type[Collection]
dir_path, # type: bytes
): # type: (...) -> Collection
"""Construct a collection instance based on an arbitrary dir.
This alternative constructor infers the FQCN based on the parent
and current directory names. It also sets the version to "*"
regardless of whether any of known metadata files are present.
"""
# There is no metadata, but it isn't required for a functional collection. Determine the namespace.name from the path.
u_dir_path = to_text(dir_path, errors='surrogate_or_strict')
path_list = u_dir_path.split(os.path.sep)
req_name = '.'.join(path_list[-2:])
return cls(req_name, '*', dir_path, 'dir', None) # type: ignore[call-arg]
@classmethod
def from_string(cls, collection_input, artifacts_manager, supplemental_signatures):
req = {}
if _is_concrete_artifact_pointer(collection_input):
# Arg is a file path or URL to a collection
req['name'] = collection_input
else:
req['name'], _sep, req['version'] = collection_input.partition(':')
if not req['version']:
del req['version']
req['signatures'] = supplemental_signatures
return cls.from_requirement_dict(req, artifacts_manager)
@classmethod
def from_requirement_dict(cls, collection_req, art_mgr):
req_name = collection_req.get('name', None)
req_version = collection_req.get('version', '*')
req_type = collection_req.get('type')
# TODO: decide how to deprecate the old src API behavior
req_source = collection_req.get('source', None)
req_signature_sources = collection_req.get('signatures', None)
if req_signature_sources is not None:
if art_mgr.keyring is None:
raise AnsibleError(
f"Signatures were provided to verify {req_name} but no keyring was configured."
)
if not isinstance(req_signature_sources, MutableSequence):
req_signature_sources = [req_signature_sources]
req_signature_sources = frozenset(req_signature_sources)
if req_type is None:
if ( # FIXME: decide on the future behavior:
_ALLOW_CONCRETE_POINTER_IN_SOURCE
and req_source is not None
and _is_concrete_artifact_pointer(req_source)
):
src_path = req_source
elif (
req_name is not None
and AnsibleCollectionRef.is_valid_collection_name(req_name)
):
req_type = 'galaxy'
elif (
req_name is not None
and _is_concrete_artifact_pointer(req_name)
):
src_path, req_name = req_name, None
else:
dir_tip_tmpl = ( # NOTE: leading LFs are for concat
'\n\nTip: Make sure you are pointing to the right '
'subdirectory — `{src!s}` looks like a directory '
'but it is neither a collection, nor a namespace '
'dir.'
)
if req_source is not None and os.path.isdir(req_source):
tip = dir_tip_tmpl.format(src=req_source)
elif req_name is not None and os.path.isdir(req_name):
tip = dir_tip_tmpl.format(src=req_name)
elif req_name:
tip = '\n\nCould not find {0}.'.format(req_name)
else:
tip = ''
raise AnsibleError( # NOTE: I'd prefer a ValueError instead
'Neither the collection requirement entry key '
"'name', nor 'source' point to a concrete "
"resolvable collection artifact. Also 'name' is "
'not an FQCN. A valid collection name must be in '
'the format <namespace>.<collection>. Please make '
'sure that the namespace and the collection name '
' contain characters from [a-zA-Z0-9_] only.'
'{extra_tip!s}'.format(extra_tip=tip),
)
if req_type is None:
if _is_git_url(src_path):
req_type = 'git'
req_source = src_path
elif _is_http_url(src_path):
req_type = 'url'
req_source = src_path
elif _is_file_path(src_path):
req_type = 'file'
req_source = src_path
elif _is_collection_dir(src_path):
if _is_installed_collection_dir(src_path) and _is_collection_src_dir(src_path):
# Note that ``download`` requires a dir with a ``galaxy.yml`` and fails if it
# doesn't exist, but if a ``MANIFEST.json`` also exists, it would be used
# instead of the ``galaxy.yml``.
raise AnsibleError(
u"Collection requirement at '{path!s}' has both a {manifest_json!s} "
u"file and a {galaxy_yml!s}.\nThe requirement must either be an installed "
u"collection directory or a source collection directory, not both.".
format(
path=to_text(src_path, errors='surrogate_or_strict'),
manifest_json=to_text(_MANIFEST_JSON),
galaxy_yml=to_text(_GALAXY_YAML),
)
)
req_type = 'dir'
req_source = src_path
elif _is_collection_namespace_dir(src_path):
req_name = None # No name for a virtual req or "namespace."?
req_type = 'subdirs'
req_source = src_path
else:
raise AnsibleError( # NOTE: this is never supposed to be hit
'Failed to automatically detect the collection '
'requirement type.',
)
if req_type not in {'file', 'galaxy', 'git', 'url', 'dir', 'subdirs'}:
raise AnsibleError(
"The collection requirement entry key 'type' must be "
'one of file, galaxy, git, dir, subdirs, or url.'
)
if req_name is None and req_type == 'galaxy':
raise AnsibleError(
'Collections requirement entry should contain '
"the key 'name' if it's requested from a Galaxy-like "
'index server.',
)
if req_type != 'galaxy' and req_source is None:
req_source, req_name = req_name, None
if (
req_type == 'galaxy' and
isinstance(req_source, GalaxyAPI) and
not _is_http_url(req_source.api_server)
):
raise AnsibleError(
"Collections requirement 'source' entry should contain "
'a valid Galaxy API URL but it does not: {not_url!s} '
'is not an HTTP URL.'.
format(not_url=req_source.api_server),
)
tmp_inst_req = cls(req_name, req_version, req_source, req_type, req_signature_sources)
if req_type not in {'galaxy', 'subdirs'} and req_name is None:
req_name = art_mgr.get_direct_collection_fqcn(tmp_inst_req) # TODO: fix the cache key in artifacts manager?
if req_type not in {'galaxy', 'subdirs'} and req_version == '*':
req_version = art_mgr.get_direct_collection_version(tmp_inst_req)
return cls(
req_name, req_version,
req_source, req_type,
req_signature_sources,
)
def __repr__(self):
return (
'<{self!s} of type {coll_type!r} from {src!s}>'.
format(self=self, coll_type=self.type, src=self.src or 'Galaxy')
)
def __str__(self):
return to_native(self.__unicode__())
def __unicode__(self):
if self.fqcn is None:
return (
u'"virtual collection Git repo"' if self.is_scm
else u'"virtual collection namespace"'
)
return (
u'{fqcn!s}:{ver!s}'.
format(fqcn=to_text(self.fqcn), ver=to_text(self.ver))
)
@property
def may_have_offline_galaxy_info(self):
if self.fqcn is None:
# Virtual collection
return False
elif not self.is_dir or self.src is None or not _is_collection_dir(self.src):
# Not a dir or isn't on-disk
return False
return True
def construct_galaxy_info_path(self, b_metadata_dir):
if not self.may_have_offline_galaxy_info and not self.type == 'galaxy':
raise TypeError('Only installed collections from a Galaxy server have offline Galaxy info')
# ns.coll-1.0.0.info
b_dir_name = to_bytes(f"{self.namespace}.{self.name}-{self.ver}.info", errors="surrogate_or_strict")
# collections/ansible_collections/ns.coll-1.0.0.info/GALAXY.yml
return os.path.join(b_metadata_dir, b_dir_name, _SOURCE_METADATA_FILE)
def _get_separate_ns_n_name(self): # FIXME: use LRU cache
return self.fqcn.split('.')
@property
def namespace(self):
if self.is_virtual:
raise TypeError('Virtual collections do not have a namespace')
return self._get_separate_ns_n_name()[0]
@property
def name(self):
if self.is_virtual:
raise TypeError('Virtual collections do not have a name')
return self._get_separate_ns_n_name()[-1]
@property
def canonical_package_id(self):
if not self.is_virtual:
return to_native(self.fqcn)
return (
'<virtual namespace from {src!s} of type {src_type!s}>'.
format(src=to_native(self.src), src_type=to_native(self.type))
)
@property
def is_virtual(self):
return self.is_scm or self.is_subdirs
@property
def is_file(self):
return self.type == 'file'
@property
def is_dir(self):
return self.type == 'dir'
@property
def namespace_collection_paths(self):
return [
to_native(path)
for path in _find_collections_in_subdirs(self.src)
]
@property
def is_subdirs(self):
return self.type == 'subdirs'
@property
def is_url(self):
return self.type == 'url'
@property
def is_scm(self):
return self.type == 'git'
@property
def is_concrete_artifact(self):
return self.type in {'git', 'url', 'file', 'dir', 'subdirs'}
@property
def is_online_index_pointer(self):
return not self.is_concrete_artifact
@property
def source_info(self):
return self._source_info
RequirementNamedTuple = namedtuple('Requirement', ('fqcn', 'ver', 'src', 'type', 'signature_sources'))
CandidateNamedTuple = namedtuple('Candidate', ('fqcn', 'ver', 'src', 'type', 'signatures'))
class Requirement(
_ComputedReqKindsMixin,
RequirementNamedTuple,
):
"""An abstract requirement request."""
def __new__(cls, *args, **kwargs):
self = RequirementNamedTuple.__new__(cls, *args, **kwargs)
return self
def __init__(self, *args, **kwargs):
super(Requirement, self).__init__()
class Candidate(
_ComputedReqKindsMixin,
CandidateNamedTuple,
):
"""A concrete collection candidate with its version resolved."""
def __new__(cls, *args, **kwargs):
self = CandidateNamedTuple.__new__(cls, *args, **kwargs)
return self
def __init__(self, *args, **kwargs):
super(Candidate, self).__init__()
|
bcoca/ansible
|
lib/ansible/galaxy/dependency_resolution/dataclasses.py
|
Python
|
gpl-3.0
| 20,285
|
[
"Galaxy"
] |
fa3c308f94de854d8ce63fdd294807aa7bd0375d15b5a2ef34c0790c0d4d6263
|
#!/usr/bin/env python
#
# $File: funcform.py $
#
# This file is part of simuPOP, a forward-time population genetics
# simulation environment. Please visit http://simupop.sourceforge.net
# for details.
#
# Copyright (C) 2004 - 2010 Bo Peng (bpeng@mdanderson.org)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# This script is an example in the simuPOP user's guide. Please refer to
# the user's guide (http://simupop.sourceforge.net/manual) for a detailed
# description of this example.
#
from simuPOP import InitGenotype, Population
def initGenotype(pop, *args, **kwargs):
InitGenotype(*args, **kwargs).apply(pop)
pop = Population(1000, loci=[2,3])
initGenotype(pop, freq=[.2, .3, .5])
|
BoPeng/simuPOP
|
docs/funcform.py
|
Python
|
gpl-2.0
| 1,286
|
[
"VisIt"
] |
f01664b85abc2abe592d106e1ea5603e0497a0c94d008f2ee07faa76046f9380
|
from __future__ import absolute_import, unicode_literals
from bs4 import BeautifulSoup
from django.test import TestCase
from wagtail.core import hooks
class TestHooks(TestCase):
fixtures = ['test.json']
def test_menus_modify_primed_menu_items(self):
# NOTE: Positional args used to ensure supplied args remain consistent
@hooks.register('menus_modify_primed_menu_items')
def modify_menu_items(
menu_items, request, parent_context, parent_page, menu_instance,
original_menu_instance, menu_tag, original_menu_tag, current_level,
max_levels, current_site, current_page, current_section_root_page,
current_page_ancestor_ids, apply_active_classes,
allow_repeating_parents, use_absolute_page_urls
):
if original_menu_tag == 'main_menu' and current_level == 1:
menu_items.append({
'href': 'https://rkh.co.uk',
'text': 'VISIT RKH.CO.UK',
'active_class': 'external',
})
return menu_items
# Let's render the test homepage to see what happens!
response = self.client.get('/')
# unhook asap to prevent knock-on effects on failure
del hooks._hooks['menus_modify_primed_menu_items']
# If the the hook failed to receive any of the arguments defined
# on `modify_menu_items` above, there will be an error
self.assertEqual(response.status_code, 200)
# There are 5 main menus being output, and because our hook only adds
# the additional item to the first level of each of those, the
# 'VISIT RKH.CO.UK' text should appear exactly 5 times
self.assertContains(response, 'VISIT RKH.CO.UK', 5)
def test_menus_modify_raw_menu_items(self):
# NOTE: Positional args used to ensure supplied args remain consistent
@hooks.register('menus_modify_raw_menu_items')
def modify_menu_items(
menu_items, request, parent_context, parent_page, menu_instance,
original_menu_instance, menu_tag, original_menu_tag, current_level,
max_levels, current_site, current_page, current_section_root_page,
current_page_ancestor_ids, apply_active_classes,
allow_repeating_parents, use_absolute_page_urls
):
if original_menu_tag == 'section_menu' and current_level == 1:
"""
For the first level of section menus, add a copy of the first
page to the end of the list
"""
try:
menu_items.append(menu_items[0])
except KeyError:
pass
return menu_items
# Let's render the 'about us' page to see what happens!
response = self.client.get('/about-us/')
# unhook asap to prevent knock-on effects on failure
del hooks._hooks['menus_modify_raw_menu_items']
# If the the hook failed to receive any of the arguments defined
# on `modify_menu_items` above, there will be an error
self.assertEqual(response.status_code, 200)
# Test output reflects hook changes
soup = BeautifulSoup(response.content, 'html5lib')
section_menu_html = soup.find(id='section-menu-one-level').decode()
# 'Call us' is a page link, so should no longer appear
expected_html = """
<div id="section-menu-one-level">
<nav class="nav-section" role="navigation">
<a href="/about-us/" class="ancestor section_root">About us</a>
<ul>
<li class="active"><a href="/about-us/">Section home</a></li>
<li class=""><a href="/about-us/meet-the-team/">Meet the team</a></li>
<li class=""><a href="/about-us/our-heritage/">Our heritage</a></li>
<li class=""><a href="/about-us/mission-and-values/">Our mission and values</a></li>
<li class=""><a href="/about-us/meet-the-team/">Meet the team</a></li>
</ul>
</nav>
</div>
"""
self.assertHTMLEqual(section_menu_html, expected_html)
def test_menus_modify_base_page_queryset(self):
# NOTE: Positional args used to ensure supplied args remain consistent
@hooks.register('menus_modify_base_page_queryset')
def modify_page_queryset(
queryset, request, parent_context, parent_page, menu_instance,
original_menu_instance, menu_tag, original_menu_tag, current_level,
max_levels, current_site, current_page, current_section_root_page,
current_page_ancestor_ids, apply_active_classes,
allow_repeating_parents, use_absolute_page_urls
):
"""
Nullify page queryset for 'flat menus'. Should result in only
links to custom urls being rendered.
"""
if menu_tag == 'flat_menu':
queryset = queryset.none()
return queryset
# Let's render the test homepage to see what happens!
response = self.client.get('/')
# unhook asap to prevent knock-on effects on failure
del hooks._hooks['menus_modify_base_page_queryset']
# If the the hook failed to receive any of the arguments defined
# on `modify_menu_items` above, there will be an error
self.assertEqual(response.status_code, 200)
# Test output reflects hook changes
soup = BeautifulSoup(response.content, 'html5lib')
contact_menu_html = soup.find(id='nav-contact').decode()
# 'Call us' is a page link, so should no longer appear
expected_html = """
<div id="nav-contact">
<div class="flat-menu contact no_heading">
<ul>
<li class=""><a href="#advisor-chat">Chat to an advisor</a></li>
<li class=""><a href="#request-callback">Request a callback</a></li>
</ul>
</div>
</div>
"""
self.assertHTMLEqual(contact_menu_html, expected_html)
def test_menus_modify_base_menuitem_queryset(self):
# NOTE: Positional args used to ensure supplied args remain consistent
@hooks.register('menus_modify_base_menuitem_queryset')
def modify_menuitem_queryset(
queryset, request, parent_context, parent_page, menu_instance,
original_menu_instance, menu_tag, original_menu_tag, current_level,
max_levels, current_site, current_page, current_section_root_page,
current_page_ancestor_ids, apply_active_classes,
allow_repeating_parents, use_absolute_page_urls
):
"""
Nullify menu items completely for all 'flat menus'. Should result
in completely empty menus
"""
if menu_tag == 'flat_menu':
queryset = queryset.none()
return queryset
# Let's render the test homepage to see what happens!
response = self.client.get('/')
# unhook asap to prevent knock-on effects on failure
del hooks._hooks['menus_modify_base_menuitem_queryset']
# If the the hook failed to receive any of the arguments defined
# on `modify_menu_items` above, there will be an error
self.assertEqual(response.status_code, 200)
# Test output reflects hook changes
soup = BeautifulSoup(response.content, 'html5lib')
contact_menu_html = soup.find(id='nav-contact').decode()
# There should be no menu items, so just an empty div (no <ul>)
expected_html = """
<div id="nav-contact">
<div class="flat-menu contact no_heading"></div>
</div>
"""
self.assertHTMLEqual(contact_menu_html, expected_html)
|
rkhleics/wagtailmenus
|
wagtailmenus/tests/test_hooks.py
|
Python
|
mit
| 7,924
|
[
"VisIt"
] |
19566b0850e48f7ecf9c1a5632498bc56d1c07fc1a8af0ebaedd13a5fc756dc2
|
"""
Calculating a custom statistic
==============================
This example shows how to define and use a custom
:class:`iris.analysis.Aggregator`, that provides a new statistical operator for
use with cube aggregation functions such as :meth:`~iris.cube.Cube.collapsed`,
:meth:`~iris.cube.Cube.aggregated_by` or
:meth:`~iris.cube.Cube.rolling_window`.
In this case, we have a 240-year sequence of yearly average surface temperature
over North America, and we want to calculate in how many years these exceed a
certain temperature over a spell of 5 years or more.
"""
import matplotlib.pyplot as plt
import numpy as np
import iris
from iris.analysis import Aggregator
import iris.plot as iplt
import iris.quickplot as qplt
from iris.util import rolling_window
# Define a function to perform the custom statistical operation.
# Note: in order to meet the requirements of iris.analysis.Aggregator, it must
# do the calculation over an arbitrary (given) data axis.
def count_spells(data, threshold, axis, spell_length):
"""
Function to calculate the number of points in a sequence where the value
has exceeded a threshold value for at least a certain number of timepoints.
Generalised to operate on multiple time sequences arranged on a specific
axis of a multidimensional array.
Args:
* data (array):
raw data to be compared with value threshold.
* threshold (float):
threshold point for 'significant' datapoints.
* axis (int):
number of the array dimension mapping the time sequences.
(Can also be negative, e.g. '-1' means last dimension)
* spell_length (int):
number of consecutive times at which value > threshold to "count".
"""
if axis < 0:
# just cope with negative axis numbers
axis += data.ndim
# Threshold the data to find the 'significant' points.
data_hits = data > threshold
# Make an array with data values "windowed" along the time axis.
hit_windows = rolling_window(data_hits, window=spell_length, axis=axis)
# Find the windows "full of True-s" (along the added 'window axis').
full_windows = np.all(hit_windows, axis=axis+1)
# Count points fulfilling the condition (along the time axis).
spell_point_counts = np.sum(full_windows, axis=axis, dtype=int)
return spell_point_counts
def main():
# Enable a future option, to ensure that the netcdf load works the same way
# as in future Iris versions.
iris.FUTURE.netcdf_promote = True
# Load the whole time-sequence as a single cube.
file_path = iris.sample_data_path('E1_north_america.nc')
cube = iris.load_cube(file_path)
# Make an aggregator from the user function.
SPELL_COUNT = Aggregator('spell_count',
count_spells,
units_func=lambda units: 1)
# Define the parameters of the test.
threshold_temperature = 280.0
spell_years = 5
# Calculate the statistic.
warm_periods = cube.collapsed('time', SPELL_COUNT,
threshold=threshold_temperature,
spell_length=spell_years)
warm_periods.rename('Number of 5-year warm spells in 240 years')
# Plot the results.
qplt.contourf(warm_periods, cmap='RdYlBu_r')
plt.gca().coastlines()
iplt.show()
if __name__ == '__main__':
main()
|
SusanJL/iris
|
docs/iris/example_code/General/custom_aggregation.py
|
Python
|
gpl-3.0
| 3,397
|
[
"NetCDF"
] |
9cb321376c0349ebc24cf968b862a13b58035d652408e86c326060d02b8f78f5
|
# -*- coding: utf-8 -*-
# HORTON: Helpful Open-source Research TOol for N-fermion systems.
# Copyright (C) 2011-2017 The HORTON Development Team
#
# This file is part of HORTON.
#
# HORTON is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# HORTON is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>
#
# --
'''Molpro 2012 FCIDUMP format.
.. note ::
One- and two-electron integrals are stored in chemists' notation in an
FCIDUMP file while HORTON internally uses Physicist's notation.
'''
import numpy as np
from horton.io.utils import set_four_index_element
__all__ = ['load_fcidump', 'dump_fcidump']
def load_fcidump(filename):
'''Read one- and two-electron integrals from a Molpro 2012 FCIDUMP file.
Works only for restricted wavefunctions.
Keep in mind that the FCIDUMP format changed in Molpro 2012, so files generated with
older versions are not supported.
Parameters
----------
filename : str
The filename of the fcidump file.
Returns
-------
results : dict
Data loaded from the file, with keys: ``nelec``, ``ms2``, ``one_mo``, ``two_mo``,
``core_energy``.
'''
with open(filename) as f:
# check header
line = next(f)
if not line.startswith(' &FCI NORB='):
raise IOError('Error in FCIDUMP file header')
# read info from header
words = line[5:].split(',')
header_info = {}
for word in words:
if word.count('=') == 1:
key, value = word.split('=')
header_info[key.strip()] = value.strip()
nbasis = int(header_info['NORB'])
nelec = int(header_info['NELEC'])
ms2 = int(header_info['MS2'])
# skip rest of header
for line in f:
words = line.split()
if words[0] == "&END" or words[0] == "/END" or words[0]=="/":
break
# read the integrals
one_mo = np.zeros((nbasis, nbasis))
two_mo = np.zeros((nbasis, nbasis, nbasis, nbasis))
core_energy = 0.0
for line in f:
words = line.split()
if len(words) != 5:
raise IOError('Expecting 5 fields on each data line in FCIDUMP')
value = float(words[0])
if words[3] != '0':
ii = int(words[1])-1
ij = int(words[2])-1
ik = int(words[3])-1
il = int(words[4])-1
# Uncomment the following line if you want to assert that the
# FCIDUMP file does not contain duplicate 4-index entries.
#assert two_mo.get_element(ii,ik,ij,il) == 0.0
set_four_index_element(two_mo, ii, ik, ij, il, value)
elif words[1] != '0':
ii = int(words[1])-1
ij = int(words[2])-1
one_mo[ii, ij] = value
one_mo[ij, ii] = value
else:
core_energy = value
return {
'nelec': nelec,
'ms2': ms2,
'one_mo': one_mo,
'two_mo': two_mo,
'core_energy': core_energy,
}
def dump_fcidump(filename, data):
'''Write one- and two-electron integrals in the Molpro 2012 FCIDUMP format.
Works only for restricted wavefunctions.
Keep in mind that the FCIDUMP format changed in Molpro 2012, so files
written with this function cannot be used with older versions of Molpro
Parmeters
---------
filename : str
The filename of the FCIDUMP file. This is usually "FCIDUMP".
data : IOData
Must contain ``one_mo``, ``two_mo``. May contain ``core_energy``, ``nelec`` and
``ms``.
'''
with open(filename, 'w') as f:
one_mo = data.one_mo
two_mo = data.two_mo
nactive = one_mo.shape[0]
core_energy = getattr(data, 'core_energy', 0.0)
nelec = getattr(data, 'nelec', 0)
ms2 = getattr(data, 'ms2', 0)
# Write header
print(' &FCI NORB=%i,NELEC=%i,MS2=%i,' % (nactive, nelec, ms2), file=f)
print(' ORBSYM= '+",".join(str(1) for v in range(nactive))+",", file=f)
print(' ISYM=1', file=f)
print(' &END', file=f)
# Write integrals and core energy
for i in range(nactive):
for j in range(i+1):
for k in range(nactive):
for l in range(k+1):
if (i*(i+1))/2+j >= (k*(k+1))/2+l:
value = two_mo[i, k, j, l]
if value != 0.0:
print('%23.16e %4i %4i %4i %4i' % (value, i+1, j+1, k+1, l+1), file=f)
for i in range(nactive):
for j in range(i+1):
value = one_mo[i, j]
if value != 0.0:
print('%23.16e %4i %4i %4i %4i' % (value, i+1, j+1, 0, 0), file=f)
if core_energy != 0.0:
print('%23.16e %4i %4i %4i %4i' % (core_energy, 0, 0, 0, 0), file=f)
|
theochem/horton
|
horton/io/molpro.py
|
Python
|
gpl-3.0
| 5,497
|
[
"Molpro"
] |
07da429d104771050babe8c8686bcf9e89b35ba733a22f210d8dd359aed6b292
|
"""HTSeq is a package to process high-throughput sequencing data.
See http://www-huber.embl.de/users/anders/HTSeq for documentation.
"""
import itertools, warnings, os, shlex
try:
from _HTSeq import *
except ImportError:
if os.path.isfile( "setup.py" ):
raise ImportError( "Cannot import 'HTSeq' when working directory is HTSeq's own build directory.")
else:
raise
from _version import __version__
#from vcf_reader import *
#########################
## Utils
#########################
class FileOrSequence( object ):
""" The construcutor takes one argument, which may either be a string,
which is interpreted as a file name (possibly with path), or a
connection, by which we mean a text file opened for reading, or
any other object that can provide an iterator over strings
(lines of the file).
The advantage of passing a file name instead of an already opened file
is that if an iterator is requested several times, the file will be
re-opened each time. If the file is already open, its lines can be read
only once, and then, the iterator stays exhausted.
Furthermore, if a file name is passed that end in ".gz" or ".gzip"
(case insensitive), it is transparently gunzipped.
"""
def __init__( self, filename_or_sequence ):
self.fos = filename_or_sequence
self.line_no = None
def __iter__( self ):
self.line_no = 1
if isinstance( self.fos, str ):
if self.fos.lower().endswith( ( ".gz" , ".gzip" ) ):
lines = gzip.open( self.fos )
else:
lines = open( self.fos )
else:
lines = self.fos
for line in lines:
yield line
self.line_no += 1
if isinstance( self.fos, str ):
lines.close()
self.line_no = None
def __repr__( self ):
if isinstance( self.fos, str ):
return "<%s object, connected to file name '%s'>" % (
self.__class__.__name__, self.fos )
else:
return "<%s object, connected to %s >" % (
self.__class__.__name__, repr( self.fos ) )
def get_line_number_string( self ):
if self.line_no is None:
if isinstance( self.fos, str ):
return "file %s closed" % self.fos
else:
return "file closed"
if isinstance( self.fos, str ):
return "line %d of file %s" % ( self.line_no, self.fos )
else:
return "line %d" % self.line_no
#########################
## Features
#########################
class GenomicFeature( object ):
"""A genomic feature, i.e., an interval on a genome with metadata.
At minimum, the following information should be provided by slots:
name: a string identifying the feature (e.g., a gene symbol)
type: a string giving the feature type (e.g., "gene", "exon")
iv: a GenomicInterval object specifying the feature locus
"""
def __init__( self, name, type_, interval ):
self.name = name
self.type = intern( type_ )
self.iv = interval
def __repr__( self ):
return "<%s: %s '%s' at %s: %d -> %d (strand '%s')>" % \
( self.__class__.__name__, self.type, self.name,
self.iv.chrom, self.iv.start_d, self.iv.end_d, self.iv.strand )
def __eq__( self, other ):
if not isinstance( other, GenomicFeature ):
return False
return self.name == other.name and self.type == other.type and \
self.iv == other.iv
def __neq__( self, other ):
if not isinstance( other, GenomicFeature ):
return True
return not self.__eq__( other )
def get_gff_line( self, with_equal_sign=False ):
try:
source = self.source
except AttributeError:
source = "."
try:
score = self.score
except AttributeError:
score = "."
try:
frame = self.frame
except AttributeError:
frame = "."
try:
attr = self.attr
except AttributeError:
attr = { 'ID': self.name }
if with_equal_sign:
sep = "="
else:
sep = " "
attr_str = '; '.join( [ '%s%s\"%s\"' % ( ak, sep, attr[ak] ) for ak in attr ] )
return "\t".join( str(a) for a in ( self.iv.chrom, source,
self.type, self.iv.start+1, self.iv.end, score,
self.iv.strand, frame, attr_str ) ) + "\n"
_re_attr_main = re.compile( "\s*([^\s\=]+)[\s=]+(.*)" )
_re_attr_empty = re.compile( "^\s*$" )
def parse_GFF_attribute_string( attrStr, extra_return_first_value=False ):
"""Parses a GFF attribute string and returns it as a dictionary.
If 'extra_return_first_value' is set, a pair is returned: the dictionary
and the value of the first attribute. This might be useful if this is the ID.
"""
if attrStr.endswith( "\n" ):
attrStr = attrStr[:-1]
d = {}
first_val = "_unnamed_"
for (i, attr) in itertools.izip( itertools.count(), _HTSeq.quotesafe_split( attrStr ) ):
if _re_attr_empty.match( attr ):
continue
if attr.count( '"' ) not in ( 0, 2 ):
raise ValueError, "The attribute string seems to contain mismatched quotes."
mo = _re_attr_main.match( attr )
if not mo:
raise ValueError, "Failure parsing GFF attribute line"
val = mo.group(2)
if val.startswith( '"' ) and val.endswith( '"' ):
val = val[1:-1]
#val = urllib.unquote( val )
d[ intern(mo.group(1)) ] = intern(val)
if extra_return_first_value and i == 0:
first_val = val
if extra_return_first_value:
return ( d, first_val )
else:
return d
_re_gff_meta_comment = re.compile( "##\s*(\S+)\s+(\S*)" )
class GFF_Reader( FileOrSequence ):
"""Parse a GFF file
Pass the constructor either a file name or an iterator of lines of a
GFF files. If a file name is specified, it may refer to a gzip compressed
file.
Iterating over the object then yields GenomicFeature objects.
"""
def __init__( self, filename_or_sequence, end_included=True ):
FileOrSequence.__init__( self, filename_or_sequence )
self.end_included = end_included
self.metadata = {}
def __iter__( self ):
for line in FileOrSequence.__iter__( self ):
if line == "\n":
continue
if line.startswith( '#' ):
if line.startswith( "##" ):
mo = _re_gff_meta_comment.match( line )
if mo:
self.metadata[ mo.group(1) ] = mo.group(2)
continue
( seqname, source, feature, start, end, score,
strand, frame, attributeStr ) = line.split( "\t", 8 )
( attr, name ) = parse_GFF_attribute_string( attributeStr, True )
if self.end_included:
iv = GenomicInterval( seqname, int(start)-1, int(end), strand )
else:
iv = GenomicInterval( seqname, int(start)-1, int(end)-1, strand )
f = GenomicFeature( name, feature, iv )
if score != ".":
score = float( score )
if frame != ".":
frame = int( frame )
f.source = source
f.score = score
f.frame = frame
f.attr = attr
yield f
def make_feature_dict( feature_sequence ):
"""A feature dict is a convenient way to organize a sequence of Feature
object (which you have got, e.g., from parse_GFF).
The function returns a dict with all the feature types as keys. Each value
of this dict is again a dict, now of feature names. The values of this dict
is a list of feature.
An example makes this clear. Let's say you load the C. elegans GTF file
from Ensemble and make a feature dict:
>>> worm_features_dict = HTSeq.make_feature_dict( HTSeq.parse_GFF(
... "test_data/Caenorhabditis_elegans.WS200.55.gtf.gz" ) )
(This command may take a few minutes to deal with the 430,000 features
in the GTF file. Note that you may need a lot of RAM if you have millions
of features.)
Then, you can simply access, say, exon 0 of gene "F08E10.4" as follows:
>>> worm_features_dict[ 'exon' ][ 'F08E10.4' ][ 0 ]
<GenomicFeature: exon 'F08E10.4' at V: 17479353 -> 17479001 (strand '-')>
"""
res = {}
for f in feature_sequence:
if f.type not in res:
res[ f.type ] = {}
res_ftype = res[ f.type ]
if f.name not in res_ftype:
res_ftype[ f.name ] = [ f ]
else:
res_ftype[ f.name ].append( f )
return res
#########################
## GenomicArray
#########################
def read_chrom_lens( filename, delimiter="\t" ):
return dict( ( ( chrom, int(len) )
for chrom, len in csv.reader( open(filename), delimiter=delimiter ) ) )
#########################
## Sequence readers
#########################
_re_fasta_header_line = re.compile( r'>\s*(\S+)\s*(.*)' )
class FastaReader( FileOrSequence ):
"""A Fasta_Reader is associated with a FASTA file or an open connection
to a file-like object with content in FASTA format.
It can generate an iterator over the sequences.
"""
def __iter__( self ):
seq = None
for line in FileOrSequence.__iter__( self ):
if line.startswith( ">" ):
if seq:
s = Sequence( seq, name )
s.descr = descr
yield s
mo = _re_fasta_header_line.match( line )
name = mo.group(1)
descr = mo.group(2)
seq = ""
else:
assert seq is not None, "FASTA file does not start with '>'."
seq += line[:-1]
if seq is not None:
s = Sequence( seq, name )
s.descr = descr
yield s
def get_sequence_lengths( self ):
seqname = None
seqlengths = {}
for line in FileOrSequence.__iter__( self ):
if line.startswith( ">" ):
if seqname is not None:
seqlengths[ seqname ] = length
mo = _re_fasta_header_line.match( line )
seqname = mo.group(1)
length = 0
else:
assert seqname is not None, "FASTA file does not start with '>'."
length += len( line.rstrip() )
if seqname is not None:
seqlengths[ seqname ] = length
return seqlengths
@staticmethod
def _import_pysam():
global pysam
try:
import pysam
except ImportError:
sys.stderr.write( "Please install the 'pysam' package to be able to use the Fasta indexing functionality." )
raise
def build_index( self, force = False ):
self._import_pysam()
if not isinstance( self.fos, str ):
raise TypeError, "This function only works with FastaReader objects " + \
"connected to a fasta file via file name"
index_filename = self.fos + ".fai"
if os.access( index_filename, os.R_OK ):
if (not force) and os.stat( self.filename_or_sequence ).st_mtime <= \
os.stat( index_filename ).st_mtime:
# index is up to date
return
pysam.faidx( self.fos )
if not os.access( index_filename, os.R_OK ):
raise SystemError, "Building of Fasta index failed due to unknown error."
def __getitem__( self, iv ):
if not isinstance( iv, GenomicInterval ):
raise TypeError, "GenomicInterval expected as key."
if not isinstance( self.fos, str ):
raise TypeError, "This function only works with FastaReader objects " + \
"connected to a fasta file via file name"
self._import_pysam()
fasta = pysam.faidx( self.fos, "%s:%d-%d" % ( iv.chrom, iv.start, iv.end-1 ) )
ans = list( FastaReader( fasta ) )
assert len( ans ) == 1
ans[0].name = str(iv)
if iv.strand != "-":
return ans[0]
else:
return ans[0].get_reverse_complement()
class FastqReader( FileOrSequence ):
"""A Fastq object is associated with a FASTQ self.file. When an iterator
is requested from the object, the FASTQ file is read.
qual_scale is one of "phred", "solexa", "solexa-old".
"""
def __init__( self, file_, qual_scale = "phred" ):
FileOrSequence.__init__( self, file_ )
self.qual_scale = qual_scale
if qual_scale not in ( "phred", "solexa", "solexa-old" ):
raise ValueError, "Illegal quality scale."
def __iter__( self ):
fin = FileOrSequence.__iter__( self )
while True:
id1 = fin.next()
seq = fin.next()
id2 = fin.next()
qual = fin.next()
if qual == "":
if id1 != "":
warnings.warn( "Number of lines in FASTQ file is not "
"a multiple of 4. Discarding the last, "
"incomplete record" )
break
if not qual.endswith( "\n" ):
qual += "\n"
if not id1.startswith( "@" ):
raise ValueError( "Primary ID line in FASTQ file does"
"not start with '@'. Either this is not FASTQ data or the parser got out of sync." )
if not id2.startswith( "+" ):
raise ValueError( "Secondary ID line in FASTQ file does"
"not start with '+'. Maybe got out of sync." )
if len( id2 ) > 2 and id1[1:] != id2[1:]:
raise ValueError( "Primary and secondary ID line in FASTQ"
"disagree." )
yield SequenceWithQualities( seq[:-1], id1[1:-1], qual[:-1],
self.qual_scale )
class BowtieReader( FileOrSequence ):
"""A BowtieFile object is associated with a Bowtie output file that
contains short read alignments. It can generate an iterator of Alignment
objects."""
def __iter__( self ):
for line in FileOrSequence.__iter__( self ):
try:
algnt = BowtieAlignment( line )
except ValueError:
if line.startswith( "Reported " ):
continue
warnings.warn( "BowtieReader: Ignoring the following line, which could not be parsed:\n%s\n" % line,
RuntimeWarning )
yield algnt
def bundle_multiple_alignments( sequence_of_alignments ):
"""Some alignment programs, e.g., Bowtie, can output multiple alignments,
i.e., the same read is reported consecutively with different alignments.
This function takes an iterator over alignments and bundles consecutive
alignments regarding the same read to a list of Alignment objects and
returns an iterator over these.
"""
alignment_iter = iter( sequence_of_alignments )
algnt = alignment_iter.next()
ma = [ algnt ]
for algnt in alignment_iter:
if algnt.read.name != ma[0].read.name:
yield ma
ma = [ algnt ]
else:
ma.append( algnt )
yield ma
class SolexaExportAlignment( Alignment ):
"""Iterating over SolexaExportReader objects will yield SoelxaExportRecord
objects. These have four fields:
read - a SequenceWithQualities object
aligned - a boolean, indicating whether the object was aligned
iv - a GenomicInterval giving the alignment (or None, if not aligned)
passed_filter - a boolean, indicating whether the object passed the filter
nomatch_code - a code indicating why no match was found (or None, if the
read was aligned)
As long as 'aligned' is True, a SolexaExportRecord can be treated as an
Alignment object.
"""
def __init__( self ):
# Data is filled in by SolexaExportRecord
pass
def __repr__( self ):
if self.aligned:
return "< %s object: Read '%s', aligned to %s >" % (
self.__class__.__name__, self.read.name, self.iv )
else:
return "< %s object: Non-aligned read '%s' >" % (
self.__class__.__name__, self.read.name )
class SolexaExportReader( FileOrSequence ):
"""Parser for *_export.txt files from the SolexaPipeline software.
Iterating over a SolexaExportReader yields SolexaExportRecord objects.
"""
def __init__( self, filename_or_sequence, solexa_old = False ):
FileOrSequence.__init__( self, filename_or_sequence)
if solexa_old:
self.qualscale = "solexa-old"
else:
self.qualscale = "solexa"
@classmethod
def parse_line_bare( dummy, line ):
if line[-1] == "\n":
line = line[:-1]
res = {}
( res['machine'], res['run_number'], res['lane'], res['tile'], res['x_coord'],
res['y_coord'], res['index_string'], res['read_nbr'], res['read_seq'],
res['qual_str'], res['chrom'], res['contig'], res['pos'], res['strand'],
res['match_descr'], res['single_read_algnt_score'],
res['paired_read_algnt_score'], res['partner_chrom'], res['partner_contig'],
res['partner_offset'], res['partner_strand'], res['passed_filtering'] ) \
= line.split( "\t" )
return res
def __iter__( self ):
for line in FileOrSequence.__iter__( self ):
record = SolexaExportAlignment()
fields = SolexaExportReader.parse_line_bare( line )
if fields['read_nbr'] != "1":
warnings.warn( "Paired-end read encountered. PE is so far supported only for " +
"SAM files, not yet for SolexaExport. All PE-related fields are ignored. " )
record.read = SequenceWithQualities(
fields['read_seq'],
"%s:%s:%s:%s:%s#0" % (fields['machine'], fields['lane'], fields['tile'],
fields['x_coord'], fields['y_coord'] ),
fields['qual_str'], self.qualscale )
if fields['passed_filtering'] == 'Y':
record.passed_filter = True
elif fields['passed_filtering'] == 'N':
record.passed_filter = False
else:
raise ValueError, "Illegal 'passed filter' value in Solexa export data: '%s'." % fields['passed_filtering']
record.index_string = fields['index_string']
if fields['pos'] == '':
record.iv = None
record.nomatch_code = fields['chrom']
else:
if fields['strand'] == 'F':
strand = '+'
elif fields['strand'] == 'R':
strand = '-'
else:
raise ValueError, "Illegal strand value in Solexa export data."
start = int( fields['pos'] )
chrom = fields['chrom']
if fields['chrom'] == "":
chrom = fields['contig']
record.iv = GenomicInterval( chrom, start,
start + len( fields['read_seq'] ), strand )
yield record
class SAM_Reader( FileOrSequence ):
"""A SAM_Reader object is associated with a SAM file that
contains short read alignments. It can generate an iterator of Alignment
objects."""
def __iter__( self ):
for line in FileOrSequence.__iter__( self ):
if line.startswith( "@" ):
# do something with the header line
continue
try:
algnt = SAM_Alignment.from_SAM_line( line )
except ValueError, e:
e.args = e.args + ( self.get_line_number_string(), )
raise
yield algnt
class GenomicArrayOfSets( GenomicArray ):
"""A GenomicArrayOfSets is a specialization of GenomicArray that allows to store
sets of objects. On construction, the step vectors are initialized with empty sets.
By using the 'add_value' method, objects can be added to intervals. If an object
is already present in the set(s) at this interval, an the new object is added to
the present set, and the set is split if necessary.
"""
def __init__( self, chroms, stranded=True, storage='step', memmap_dir = "", name= "" ):
GenomicArray.__init__( self, chroms, stranded, 'O', storage, memmap_dir, name )
def add_chrom( self, chrom, length = sys.maxint, start_index = 0 ):
GenomicArray.add_chrom( self, chrom, length, start_index )
for cv in self.chrom_vectors[ chrom ].values():
cv[:] = set()
cv.is_vector_of_sets = True
###########################
## paired-end handling
###########################
def pair_SAM_alignments( alignments, bundle=False ):
mate_missing_count = [0]
def process_list( almnt_list ):
while len( almnt_list ) > 0:
a1 = almnt_list.pop( 0 )
# Find its mate
for a2 in almnt_list:
if a1.pe_which == a2.pe_which:
continue
if a1.aligned != a2.mate_aligned or a1.mate_aligned != a2.aligned:
continue
if not (a1.aligned and a2.aligned):
break
if a1.iv.chrom == a2.mate_start.chrom and a1.iv.start == a2.mate_start.pos and \
a2.iv.chrom == a1.mate_start.chrom and a2.iv.start == a1.mate_start.pos:
break
else:
if a1.mate_aligned:
mate_missing_count[0] += 1
if mate_missing_count[0] == 1:
warnings.warn( "Read " + a1.read.name + " claims to have an aligned mate " +
"which could not be found in an adjacent line." )
a2 = None
if a2 is not None:
almnt_list.remove( a2 )
if a1.pe_which == "first":
yield ( a1, a2 )
else:
assert a1.pe_which == "second"
yield ( a2, a1 )
almnt_list = []
current_name = None
for almnt in alignments:
if not almnt.paired_end:
raise ValueError, "'pair_alignments' needs a sequence of paired-end alignments"
if almnt.pe_which == "unknown":
raise ValueError, "Paired-end read found with 'unknown' 'pe_which' status."
if almnt.read.name == current_name:
almnt_list.append( almnt )
else:
if bundle:
yield list( process_list( almnt_list ) )
else:
for p in process_list( almnt_list ):
yield p
current_name = almnt.read.name
almnt_list = [ almnt ]
if bundle:
yield list( process_list( almnt_list ) )
else:
for p in process_list( almnt_list ):
yield p
if mate_missing_count[0] > 1:
warnings.warn( "%d reads with missing mate encountered." % mate_missing_count[0] )
def pair_SAM_alignments_with_buffer( alignments, max_buffer_size=3000000 ):
almnt_buffer = {}
ambiguous_pairing_counter = 0
for almnt in alignments:
if not almnt.paired_end:
raise ValueError, "Sequence of paired-end alignments expected, but got single-end alignment."
if almnt.pe_which == "unknown":
raise ValueError, "Cannot process paired-end alignment found with 'unknown' 'pe_which' status."
matekey = (
almnt.read.name,
"second" if almnt.pe_which == "first" else "first",
almnt.mate_start.chrom if almnt.mate_aligned else None,
almnt.mate_start.pos if almnt.mate_aligned else None,
almnt.iv.chrom if almnt.aligned else None,
almnt.iv.start if almnt.aligned else None,
-almnt.inferred_insert_size if almnt.aligned and almnt.mate_aligned else None )
if matekey in almnt_buffer:
if len( almnt_buffer[ matekey ] ) == 1:
mate = almnt_buffer[ matekey ][ 0 ]
del almnt_buffer[ matekey ]
else:
mate = almnt_buffer[ matekey ].pop( 0 )
if ambiguous_pairing_counter == 0:
ambiguous_pairing_first_occurance = matekey
ambiguous_pairing_counter += 1
if almnt.pe_which == "first":
yield ( almnt, mate )
else:
yield ( mate, almnt )
else:
almntkey = (
almnt.read.name, almnt.pe_which,
almnt.iv.chrom if almnt.aligned else None,
almnt.iv.start if almnt.aligned else None,
almnt.mate_start.chrom if almnt.mate_aligned else None,
almnt.mate_start.pos if almnt.mate_aligned else None,
almnt.inferred_insert_size if almnt.aligned and almnt.mate_aligned else None )
if almntkey not in almnt_buffer:
almnt_buffer[ almntkey ] = [ almnt ]
else:
almnt_buffer[ almntkey ].append( almnt )
if len(almnt_buffer) > max_buffer_size:
raise ValueError, "Maximum alignment buffer size exceeded while pairing SAM alignments."
if len(almnt_buffer) > 0:
warnings.warn( "Mate records missing for %d records; first such record: %s." %
( len(almnt_buffer), str( almnt_buffer.values()[0][0] ) ) )
for almnt_list in almnt_buffer.values():
for almnt in almnt_list:
if almnt.pe_which == "first":
yield ( almnt, None )
else:
yield ( None, almnt )
if ambiguous_pairing_counter > 0:
warnings.warn( "Mate pairing was ambiguous for %d records; mate key for first such record: %s." %
( ambiguous_pairing_counter, str( ambiguous_pairing_first_occurance ) ) )
###########################
## variant calls
###########################
_re_vcf_meta_comment = re.compile( "^##([a-zA-Z]+)\=(.*)$" )
_re_vcf_meta_descr = re.compile('ID=[^,]+,?|Number=[^,]+,?|Type=[^,]+,?|Description="[^"]+",?')
_re_vcf_meta_types = re.compile( "[INFO|FILTER|FORMAT]" )
_vcf_typemap = {
"Integer":int,
"Float":float,
"String":str,
"Flag":bool
}
class VariantCall( object ):
def __init__( self, chrom = None, pos = None, identifier = None, ref = None, alt = None, qual = None, filtr = None, info = None ):
self.chrom = chrom
self.pos = pos
self.id = identifier
self.ref = ref
self.alt = alt
self.qual = qual
self.filter = filtr
self.info = info
self._original_line = None
@classmethod
def fromdict( cls, dictionary ):
ret = cls()
ret.chrom = dictionary["chrom"]
ret.pos = dictionary["pos"]
ret.id = dictionary["id"]
ret.ref = dictionary["ref"]
ret.alt = dictionary["alt"]
ret.qual = dictionary["qual"]
ret.filter = dictionary["filter"]
ret.info = dictionary["info"]
ret._original_line = None
@classmethod
def fromline( cls, line, nsamples = 0, sampleids = [] ):
ret = cls()
if nsamples == 0:
ret.format = None
ret.chrom, ret.pos, ret.id, ret.ref, ret.alt, ret.qual, ret.filter, ret.info = line.rstrip("\n").split("\t", 7)
else:
lsplit = line.rstrip("\n").split("\t")
ret.chrom, ret.pos, ret.id, ret.ref, ret.alt, ret.qual, ret.filter, ret.info = lsplit[:8]
ret.format = lsplit[8].split(":")
ret.samples = {}
spos=9
for sid in sampleids:
ret.samples[ sid ] = dict( ( name, value ) for (name, value) in itertools.izip( ret.format, lsplit[spos].split(":") ) )
spos += 1
ret.pos = GenomicPosition( ret.chrom, int(ret.pos) )
ret.alt = ret.alt.split(",")
ret._original_line = line
return ret
def infoline( self ):
if self.info.__class__ == dict:
return ";".join(map((lambda key: str(key) + "=" + str(self.info[key])), self.info ))
else:
return self.info
def get_original_line( self ):
warnings.warn( "Original line is empty, probably this object was created from scratch and not from a line in a .vcf file!" )
return self._original_line
def sampleline( self ):
if self.format == None:
print >> sys.stderr, "No samples in this variant call!"
return ""
keys = self.format
ret = [ ":".join( keys ) ]
for sid in self.samples:
tmp = []
for k in keys:
if k in self.samples[sid]:
tmp.append( self.samples[sid][k] )
ret.append( ":".join(tmp) )
return "\t".join( ret )
def to_line( self ):
if self.format == None:
return "\t".join( map( str, [ self.pos.chrom, self.pos.pos, self.id, self.ref, ",".join( self.alt ), self.qual, self.filter, self.infoline() ] ) ) + "\n"
else:
return "\t".join( map( str, [ self.pos.chrom, self.pos.pos, self.id, self.ref, ",".join( self.alt ), self.qual, self.filter, self.infoline(), self.sampleline() ] ) ) + "\n"
def __descr__( self ):
return "<VariantCall at %s, ref '%s', alt %s >" % (str(self.pos).rstrip("/."), self.ref, str(self.alt).strip("[]"))
def __str__( self ):
return "%s:'%s'->%s" % (str(self.pos).rstrip("/."), self.ref, str(self.alt).strip("[]"))
def unpack_info( self, infodict ):
tmp = {}
for token in self.info.strip(";").split(";"):
if re.compile("=").search(token):
token = token.split("=")
if infodict.has_key( token[0] ):
tmp[token[0]] = map( infodict[token[0]], token[1].split(",") )
else:
tmp[token[0]] = token[1].split(",")
if len( tmp[ token[0] ] ) == 1:
tmp[token[0]] = tmp[token[0]][0]
else: #Flag attribute found
tmp[token] = True
diff = set( infodict.keys() ).difference( set( tmp.keys() ) )
for key in diff:
if infodict[key] == bool:
tmp[key] = False
self.info = tmp
class VCF_Reader( FileOrSequence ):
def __init__( self, filename_or_sequence ):
FileOrSequence.__init__( self, filename_or_sequence )
self.metadata = {}
self.info = {}
self.filters = {}
self.formats = {}
self.nsamples = 0
self.sampleids = []
def make_info_dict( self ):
self.infodict = dict( ( key, _vcf_typemap[self.info[key]["Type"]] ) for key in self.info.keys() )
def parse_meta( self, header_filename = None ):
if header_filename == None:
the_iter = FileOrSequence.__iter__( self )
else:
the_iter = open( header_filename, "r" )
for line in the_iter:
if line.startswith( '#' ):
if line.startswith( "##" ):
mo = _re_vcf_meta_comment.match( line )
if mo:
value = mo.group(2)
if mo.group(1) == "INFO":
value = dict( e.rstrip(",").split("=",1) for e in _re_vcf_meta_descr.findall(value) )
key = value["ID"]
del value["ID"]
self.info[ key ] = value
elif mo.group(1) == "FILTER":
value = dict( e.rstrip(",").split("=",1) for e in _re_vcf_meta_descr.findall(value) )
key = value["ID"]
del value["ID"]
self.filters[ key ] = value
elif mo.group(1) == "FORMAT":
value = dict( e.rstrip(",").split("=",1) for e in _re_vcf_meta_descr.findall(value) )
key = value["ID"]
del value["ID"]
self.formats[ key ] = value
else:
self.metadata[ mo.group(1) ] = mo.group(2)
else:
self.sampleids = line.rstrip("\t\n").split("\t")[9:]
self.nsamples = len( self.sampleids )
continue
else:
break
def meta_info( self, header_filename = None ):
ret = []
if header_filename == None:
the_iter = FileOrSequence.__iter__( self )
else:
the_iter = open( header_filename, "r" )
for line in the_iter:
if line.startswith( '#' ):
ret.append( line )
else:
break
return ret
def __iter__( self ):
for line in FileOrSequence.__iter__( self ):
if line == "\n" or line.startswith( '#' ):
continue
vc = VariantCall.fromline( line, self.nsamples, self.sampleids )
yield vc
class WiggleReader( FileOrSequence ):
def __init__( self, filename_or_sequence, verbose = True ):
FileOrSequence.__init__( self, filename_or_sequence )
self.attributes = {}
self.stepType = 'none'
self.verbose = verbose
def __iter__( self ):
span = 1
pos = None
step = None
chrom = None
for line in FileOrSequence.__iter__( self ):
if line.startswith( 'track' ):
fields = shlex.split(line)[1:]
self.attributes = dict([(p[0], p[1].strip('"')) for p in [x.split("=") for x in fields]])
elif line.startswith( 'fixedStep' ): # do fixed step stuff
self.stepType = 'fixed'
fields = shlex.split(line)[1:]
declarations = dict([(p[0], p[1].strip('"')) for p in [x.split("=") for x in fields]])
pos = int(declarations['start'])
step = int(declarations['step'])
chrom = declarations['chrom']
if 'span' in declarations:
span = int(declarations['span'])
else:
span = 1
elif line.startswith( 'variableStep' ): # do variable step stuff
self.stepType = 'variable'
fields = shlex.split(line)[1:]
declarations = dict([(p[0], p[1].strip('"')) for p in [x.split("=") for x in fields]])
chrom = declarations['chrom']
if 'span' in declarations:
span = int(declarations['span'])
else:
span = 1
elif line.startswith( 'browser' ) or line.startswith( '#' ): #Comment or ignored
if self.verbose:
print "Ignored line:", line
continue
else:
if self.stepType == 'fixed':
yield ( GenomicInterval( chrom, pos, pos + span, '.' ), float(line.strip()) )
pos += step
elif self.stepType == 'variable':
tmp = line.strip().split(" ")
pos = int(tmp[0])
yield ( GenomicInterval( chrom, pos, pos + span, '.' ), float(tmp[1]) )
class BAM_Reader( object ):
def __init__( self, filename ):
global pysam
self.filename = filename
self.sf = None # This one is only used by __getitem__
self.record_no = -1
try:
import pysam
except ImportError:
sys.stderr.write( "Please Install PySam to use the BAM_Reader Class (http://code.google.com/p/pysam/)" )
raise
def __iter__( self ):
sf = pysam.Samfile(self.filename, "rb")
self.record_no = 0
for pa in sf:
#yield SAM_Alignment.from_pysam_AlignedRead( pa, sf )
yield SAM_Alignment.from_pysam_AlignedSegment( pa, sf )
self.record_no += 1
def fetch( self, reference = None, start = None, end = None, region = None, interval = None ):
if interval: # if a GenomicInterval is given, convert it to a samtools 'region' string format. Rasi added
region = interval.chrom + ':' + str(iv.start) + '-' + str(iv.end)
sf = pysam.Samfile(self.filename, "rb")
self.record_no = 0
try:
for pa in sf.fetch( reference, start, end, region ):
yield SAM_Alignment.from_pysam_AlignedRead( pa, sf )
self.record_no += 1
except ValueError as e:
if e.message == "fetch called on bamfile without index":
print "Error: ", e.message
print "Your bam index file is missing or wrongly named, convention is that file 'x.bam' has index file 'x.bam.bai'!"
else:
raise
except:
raise
def get_line_number_string( self ):
if self.record_no == -1:
return "unopened file %s" % ( self.filename )
else:
return "record #%d in file %s" % ( self.record_no, self.filename )
def __getitem__( self, iv ):
if not isinstance( iv, GenomicInterval ):
raise TypeError, "Use a HTSeq.GenomicInterval to access regions within .bam-file!"
if self.sf is None:
self.sf = pysam.Samfile( self.filename, "rb" )
if not self.sf._hasIndex():
raise ValueError, "The .bam-file has no index, random-access is disabled!"
for pa in self.sf.fetch( iv.chrom, iv.start+1, iv.end ):
yield SAM_Alignment.from_pysam_AlignedRead( pa, self.sf )
def get_header_dict( self ):
sf = pysam.Samfile(self.filename, "rb")
return sf.header
class BAM_Writer( object ):
def __init__( self, filename, template = None, referencenames = None, referencelengths = None, text = None, header = None ):
try:
import pysam
except ImportError:
sys.stderr.write( "Please Install PySam to use the BAM_Writer Class (http://code.google.com/p/pysam/)" )
raise
self.filename = filename
self.template = template
self.referencenames = referencenames
self.referencelengths = referencelengths
self.text = text
self.header = header
self.sf = pysam.Samfile( self.filename, mode="wb", template = self.template, referencenames = self.referencenames, referencelengths = self.referencelengths, text = self.text, header = self.header )
@classmethod
def from_BAM_Reader( cls, fn, br ):
return BAM_Writer( filename = fn, header = br.get_header_dict() )
def write( self, alnmt):
#self.sf.write( alnmt.to_pysam_AlignedRead( self.sf ) )
self.sf.write( alnmt.to_pysam_AlignedSegment( self.sf ) )
def close( self ):
self.sf.close()
class BED_Reader( FileOrSequence ):
def __init__( self, filename_or_sequence ):
FileOrSequence.__init__( self, filename_or_sequence )
def __iter__( self ):
for line in FileOrSequence.__iter__( self ):
if line.startswith( "track" ):
continue
fields = line.split()
if len(fields) < 3:
raise ValueError, "BED file line contains less than 3 fields"
if len(fields) > 9:
raise ValueError, "BED file line contains more than 9 fields"
iv = GenomicInterval( fields[0], int(fields[1]), int(fields[2]), fields[5] if len(fields) > 5 else "." )
f = GenomicFeature( fields[3] if len(fields) > 3 else "unnamed", "BED line", iv )
f.score = float( fields[4] ) if len(fields) > 4 else None
f.thick = GenomicInterval( iv.chrom, int( fields[6] ), int( fields[7] ), iv.strand ) if len(fields) > 7 else None
f.itemRgb = [ int(a) for a in fields[8].split(",") ] if len(fields) > 8 else None
yield(f)
|
rasilab/htseq
|
HTSeq/__init__.py
|
Python
|
gpl-3.0
| 39,493
|
[
"Bowtie",
"HTSeq",
"pysam"
] |
4dd731709d9c6e8d8cb64967bd29391cc1719f426d4ee7e4b748582c7de67350
|
"""Utilities to assist with commerce tasks."""
import json
import logging
from urllib.parse import urlencode, urljoin
import requests
import waffle # lint-amnesty, pylint: disable=invalid-django-waffle-import
from django.conf import settings
from django.contrib.auth import get_user_model
from django.urls import reverse
from django.utils.translation import gettext as _
from opaque_keys.edx.keys import CourseKey
from common.djangoapps.course_modes.models import CourseMode
from common.djangoapps.student.models import CourseEnrollment # lint-amnesty, pylint: disable=unused-import
from openedx.core.djangoapps.commerce.utils import ecommerce_api_client, is_commerce_service_configured
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
from openedx.core.djangoapps.theming import helpers as theming_helpers
from .models import CommerceConfiguration
log = logging.getLogger(__name__)
def is_account_activation_requirement_disabled():
"""
Checks to see if the django-waffle switch for disabling the account activation requirement is active
Returns:
Boolean value representing switch status
"""
switch_name = configuration_helpers.get_value(
'DISABLE_ACCOUNT_ACTIVATION_REQUIREMENT_SWITCH',
settings.DISABLE_ACCOUNT_ACTIVATION_REQUIREMENT_SWITCH
)
return waffle.switch_is_active(switch_name)
class EcommerceService:
""" Helper class for ecommerce service integration. """
def __init__(self):
self.config = CommerceConfiguration.current()
@property
def ecommerce_url_root(self):
""" Retrieve Ecommerce service public url root. """
return configuration_helpers.get_value('ECOMMERCE_PUBLIC_URL_ROOT', settings.ECOMMERCE_PUBLIC_URL_ROOT)
def get_absolute_ecommerce_url(self, ecommerce_page_url):
""" Return the absolute URL to the ecommerce page.
Args:
ecommerce_page_url (str): Relative path to the ecommerce page.
Returns:
Absolute path to the ecommerce page.
"""
return urljoin(self.ecommerce_url_root, ecommerce_page_url)
def get_order_dashboard_url(self):
""" Return the URL to the ecommerce dashboard orders page.
Returns:
String: order dashboard url.
"""
return self.get_absolute_ecommerce_url(CommerceConfiguration.DEFAULT_ORDER_DASHBOARD_URL)
def get_receipt_page_url(self, order_number):
"""
Gets the URL for the Order Receipt page hosted by the ecommerce service.
Args:
order_number (str): Order number.
Returns:
Receipt page for the specified Order.
"""
return self.get_absolute_ecommerce_url(CommerceConfiguration.DEFAULT_RECEIPT_PAGE_URL + order_number)
def is_enabled(self, user):
"""
Determines the availability of the EcommerceService based on user activation and service configuration.
Note: If the user is anonymous we bypass the user activation gate and only look at the service config.
Returns:
Boolean
"""
user_is_active = user.is_active or is_account_activation_requirement_disabled()
allow_user = user_is_active or user.is_anonymous
return allow_user and self.config.checkout_on_ecommerce_service
def payment_page_url(self):
""" Return the URL for the checkout page.
Example:
http://localhost:8002/basket/add/
"""
return self.get_absolute_ecommerce_url(self.config.basket_checkout_page)
def get_checkout_page_url(self, *skus, **kwargs):
""" Construct the URL to the ecommerce checkout page and include products.
Args:
skus (list): List of SKUs associated with products to be added to basket
program_uuid (string): The UUID of the program, if applicable
Returns:
Absolute path to the ecommerce checkout page showing basket that contains specified products.
Example:
http://localhost:8002/basket/add/?sku=5H3HG5&sku=57FHHD
http://localhost:8002/basket/add/?sku=5H3HG5&sku=57FHHD&bundle=3bdf1dd1-49be-4a15-9145-38901f578c5a
"""
program_uuid = kwargs.get('program_uuid')
enterprise_catalog_uuid = kwargs.get('catalog')
query_params = {'sku': skus}
if enterprise_catalog_uuid:
query_params.update({'catalog': enterprise_catalog_uuid})
url = '{checkout_page_path}?{query_params}'.format(
checkout_page_path=self.get_absolute_ecommerce_url(self.config.basket_checkout_page),
query_params=urlencode(query_params, doseq=True),
)
if program_uuid:
url = '{url}&bundle={program_uuid}'.format(
url=url,
program_uuid=program_uuid
)
return url
def upgrade_url(self, user, course_key):
"""
Returns the URL for the user to upgrade, or None if not applicable.
"""
verified_mode = CourseMode.verified_mode_for_course(course_key)
if verified_mode:
if self.is_enabled(user):
return self.get_checkout_page_url(verified_mode.sku)
else:
return reverse('dashboard')
return None
def refund_entitlement(course_entitlement):
"""
Attempt a refund of a course entitlement. Verify the User before calling this refund method
Returns:
bool: True if the Refund is successfully processed.
"""
user_model = get_user_model()
enrollee = course_entitlement.user
entitlement_uuid = str(course_entitlement.uuid)
if not is_commerce_service_configured():
log.error(
'Ecommerce service is not configured, cannot refund for user [%s], course entitlement [%s].',
enrollee.id,
entitlement_uuid
)
return False
service_user = user_model.objects.get(username=settings.ECOMMERCE_SERVICE_WORKER_USERNAME)
api_client = ecommerce_api_client(service_user)
log.info(
'Attempting to create a refund for user [%s], course entitlement [%s]...',
enrollee.id,
entitlement_uuid
)
try:
refund_ids = api_client.refunds.post(
{
'order_number': course_entitlement.order_number,
'username': enrollee.username,
'entitlement_uuid': entitlement_uuid,
}
)
except Exception as exc: # pylint: disable=broad-except
# Catch any possible exceptions from the Ecommerce service to ensure we fail gracefully
log.exception(
"Unexpected exception while attempting to initiate refund for user [%s], "
"course entitlement [%s] message: [%s]",
enrollee.id,
course_entitlement.uuid,
str(exc)
)
return False
if refund_ids:
log.info(
'Refund successfully opened for user [%s], course entitlement [%s]: %r',
enrollee.id,
entitlement_uuid,
refund_ids,
)
return _process_refund(
refund_ids=refund_ids,
api_client=api_client,
mode=course_entitlement.mode,
user=enrollee,
always_notify=True,
)
else:
log.warning('No refund opened for user [%s], course entitlement [%s]', enrollee.id, entitlement_uuid)
return False
def refund_seat(course_enrollment, change_mode=False):
"""
Attempt to initiate a refund for any orders associated with the seat being unenrolled,
using the commerce service.
Arguments:
course_enrollment (CourseEnrollment): a student enrollment
change_mode (Boolean): change the course mode to free mode or not
Returns:
A list of the external service's IDs for any refunds that were initiated
(may be empty).
Raises:
exceptions.SlumberBaseException: for any unhandled HTTP error during communication with the E-Commerce Service.
exceptions.Timeout: if the attempt to reach the commerce service timed out.
"""
User = get_user_model() # pylint:disable=invalid-name
course_key_str = str(course_enrollment.course_id)
enrollee = course_enrollment.user
service_user = User.objects.get(username=settings.ECOMMERCE_SERVICE_WORKER_USERNAME)
api_client = ecommerce_api_client(service_user)
log.info('Attempting to create a refund for user [%s], course [%s]...', enrollee.id, course_key_str)
refund_ids = api_client.refunds.post({'course_id': course_key_str, 'username': enrollee.username})
if refund_ids:
log.info('Refund successfully opened for user [%s], course [%s]: %r', enrollee.id, course_key_str, refund_ids)
_process_refund(
refund_ids=refund_ids,
api_client=api_client,
mode=course_enrollment.mode,
user=enrollee,
)
if change_mode and CourseMode.can_auto_enroll(course_id=CourseKey.from_string(course_key_str)):
course_enrollment.update_enrollment(mode=CourseMode.auto_enroll_mode(course_id=course_key_str),
is_active=False, skip_refund=True)
course_enrollment.save()
else:
log.info('No refund opened for user [%s], course [%s]', enrollee.id, course_key_str)
return refund_ids
def _process_refund(refund_ids, api_client, mode, user, always_notify=False):
"""
Helper method to process a refund for a given course_product. This method assumes that the User has already
been unenrolled.
Arguments:
refund_ids: List of refund ids to be processed
api_client: The API Client used in the processing of refunds
mode: The mode that the refund should be processed for
user: The user that the refund is being processed for
always_notify (bool): This will enable always notifying support with Zendesk tickets when
an approval is required
Returns:
bool: True if the refund process was successful, False if there are any Errors that are not handled
"""
config = CommerceConfiguration.current()
if config.enable_automatic_refund_approval:
refunds_requiring_approval = []
for refund_id in refund_ids:
try:
# NOTE: The following assumes that the user has already been unenrolled.
# We are then able to approve payment. Additionally, this ensures we don't tie up an
# additional web worker when the E-Commerce Service tries to unenroll the learner.
api_client.refunds(refund_id).process.put({'action': 'approve_payment_only'})
log.info('Refund [%d] successfully approved.', refund_id)
except: # pylint: disable=bare-except
# Push the refund to Support to process
log.exception('Failed to automatically approve refund [%d]!', refund_id)
refunds_requiring_approval.append(refund_id)
else:
refunds_requiring_approval = refund_ids
if refunds_requiring_approval:
# XCOM-371: this is a temporary measure to suppress refund-related email
# notifications to students and support for free enrollments. This
# condition should be removed when the CourseEnrollment.refundable() logic
# is updated to be more correct, or when we implement better handling (and
# notifications) in Otto for handling reversal of $0 transactions.
if mode != 'verified' and not always_notify:
# 'verified' is the only enrollment mode that should presently
# result in opening a refund request.
log.info(
'Skipping refund support notification for non-verified mode for user [%s], mode: [%s]',
user.id,
mode,
)
else:
try:
return _send_refund_notification(user, refunds_requiring_approval)
except: # pylint: disable=bare-except
# Unable to send notification to Support, do not break as this method is used by Signals
log.warning('Could not send support notification for refund.', exc_info=True)
return False
return True
def _send_refund_notification(user, refund_ids):
"""
Notify the support team of the refund request.
Returns:
bool: True if we are able to send the notification. In this case that means we were able to create
a ZenDesk ticket
"""
tags = ['auto_refund']
if theming_helpers.is_request_in_themed_site():
# this is not presently supported with the external service.
raise NotImplementedError("Unable to send refund processing emails to support teams.")
# Build the information for the ZenDesk ticket
student = user
subject = _("[Refund] User-Requested Refund")
body = _generate_refund_notification_body(student, refund_ids)
requester_name = student.profile.name or student.username
return create_zendesk_ticket(requester_name, student.email, subject, body, tags)
def _generate_refund_notification_body(student, refund_ids):
""" Returns a refund notification message body. """
msg = _(
'A refund request has been initiated for {username} ({email}). '
'To process this request, please visit the link(s) below.'
).format(username=student.username, email=student.email)
ecommerce_url_root = configuration_helpers.get_value(
'ECOMMERCE_PUBLIC_URL_ROOT', settings.ECOMMERCE_PUBLIC_URL_ROOT,
)
refund_urls = [urljoin(ecommerce_url_root, f'/dashboard/refunds/{refund_id}/')
for refund_id in refund_ids]
# emails contained in this message could contain unicode characters so encode as such
return '{msg}\n\n{urls}'.format(msg=msg, urls='\n'.join(refund_urls))
def create_zendesk_ticket(requester_name, requester_email, subject, body, tags=None):
"""
Create a Zendesk ticket via API.
Returns:
bool: False if we are unable to create the ticket for any reason
"""
if not (settings.ZENDESK_URL and settings.ZENDESK_USER and settings.ZENDESK_API_KEY):
log.error('Zendesk is not configured. Cannot create a ticket.')
return False
# Copy the tags to avoid modifying the original list.
tags = set(tags or [])
tags.add('LMS')
tags = list(tags)
data = {
'ticket': {
'requester': {
'name': requester_name,
'email': str(requester_email)
},
'subject': subject,
'comment': {'body': body},
'tags': tags
}
}
# Encode the data to create a JSON payload
payload = json.dumps(data)
# Set the request parameters
url = urljoin(settings.ZENDESK_URL, '/api/v2/tickets.json')
user = f'{settings.ZENDESK_USER}/token'
pwd = settings.ZENDESK_API_KEY
headers = {'content-type': 'application/json'}
try:
response = requests.post(url, data=payload, auth=(user, pwd), headers=headers)
# Check for HTTP codes other than 201 (Created)
if response.status_code != 201:
log.error('Failed to create ticket. Status: [%d], Body: [%s]', response.status_code, response.content)
return False
else:
log.debug('Successfully created ticket.')
except Exception: # pylint: disable=broad-except
log.exception('Failed to create ticket.')
return False
return True
|
edx/edx-platform
|
lms/djangoapps/commerce/utils.py
|
Python
|
agpl-3.0
| 15,662
|
[
"VisIt"
] |
6a13d6b72f0f863bddbee0c16090e59496c43d0816be6d6c033b47a48a82dd2d
|
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2016 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
from os import path
from unittest import mock
import fixtures
import snapcraft
from snapcraft.plugins import gulp, nodejs
from snapcraft import tests
class GulpPluginTestCase(tests.TestCase):
def setUp(self):
super().setUp()
self.project_options = snapcraft.ProjectOptions()
patcher = mock.patch('snapcraft.internal.common.run')
self.run_mock = patcher.start()
self.addCleanup(patcher.stop)
patcher = mock.patch('snapcraft.sources.Tar')
self.tar_mock = patcher.start()
self.addCleanup(patcher.stop)
patcher = mock.patch('sys.stdout')
patcher.start()
self.addCleanup(patcher.stop)
def test_pull_local_sources(self):
class Options:
source = '.'
gulp_tasks = []
node_engine = '4'
plugin = gulp.GulpPlugin('test-part', Options(), self.project_options)
os.makedirs(plugin.sourcedir)
plugin.pull()
self.assertFalse(self.run_mock.called, 'run() was called')
self.tar_mock.assert_has_calls([
mock.call(
nodejs.get_nodejs_release(plugin.options.node_engine),
path.join(os.path.abspath('.'), 'parts', 'test-part', 'npm')),
mock.call().download()])
def test_build(self):
self.useFixture(tests.fixture_setup.CleanEnvironment())
self.useFixture(fixtures.EnvironmentVariable(
'PATH', '/bin'))
class Options:
source = '.'
gulp_tasks = []
node_engine = '4'
plugin = gulp.GulpPlugin('test-part', Options(), self.project_options)
os.makedirs(plugin.sourcedir)
open(os.path.join(plugin.sourcedir, 'package.json'), 'w').close()
plugin.build()
path = '{}:/bin'.format(os.path.join(plugin._npm_dir, 'bin'))
self.run_mock.assert_has_calls([
mock.call(['npm', 'install', '-g', 'gulp-cli'],
cwd=plugin.builddir,
env={'PATH': path,
'NPM_CONFIG_PREFIX': plugin._npm_dir}),
mock.call(['npm', 'install', '--only-development'],
cwd=plugin.builddir,
env={'PATH': path,
'NPM_CONFIG_PREFIX': plugin._npm_dir}),
])
self.tar_mock.assert_has_calls([
mock.call(
nodejs.get_nodejs_release(plugin.options.node_engine),
os.path.join(plugin._npm_dir)),
mock.call().provision(
plugin._npm_dir, clean_target=False, keep_tarball=True)])
@mock.patch('platform.machine')
def test_unsupported_arch_raises_exception(self, machine_mock):
machine_mock.return_value = 'fantasy-arch'
class Options:
source = None
gulp_tasks = []
node_engine = '4'
with self.assertRaises(EnvironmentError) as raised:
gulp.GulpPlugin('test-part', Options(), self.project_options)
self.assertEqual(raised.exception.__str__(),
'architecture not supported (fantasy-arch)')
def test_schema(self):
self.maxDiff = None
plugin_schema = {
'$schema': 'http://json-schema.org/draft-04/schema#',
'additionalProperties': False,
'properties': {
'gulp-tasks': {'default': [],
'items': {'type': 'string'},
'minitems': 1,
'type': 'array',
'uniqueItems': True},
'node-engine': {'default': '4.4.4', 'type': 'string'},
'source': {'type': 'string'},
'source-branch': {'default': '', 'type': 'string'},
'source-subdir': {'default': None, 'type': 'string'},
'source-tag': {'default': '', 'type:': 'string'},
'source-type': {'default': '', 'type': 'string'},
'source-depth': {'default': 0, 'type': 'integer'},
'disable-parallel': {'default': False, 'type': 'boolean'}},
'pull-properties': ['source', 'source-type', 'source-branch',
'source-tag', 'source-subdir', 'node-engine'],
'build-properties': ['disable-parallel', 'gulp-tasks'],
'required': ['source', 'gulp-tasks'],
'type': 'object'}
self.assertEqual(gulp.GulpPlugin.schema(), plugin_schema)
def test_clean_pull_step(self):
class Options:
source = '.'
gulp_tasks = []
node_engine = '4'
plugin = gulp.GulpPlugin('test-part', Options(), self.project_options)
os.makedirs(plugin.sourcedir)
plugin.pull()
self.assertTrue(os.path.exists(plugin._npm_dir))
plugin.clean_pull()
self.assertFalse(os.path.exists(plugin._npm_dir))
|
dholbach/snapcraft
|
snapcraft/tests/test_plugin_gulp.py
|
Python
|
gpl-3.0
| 5,630
|
[
"GULP"
] |
987f618bbdd079c4f4090a606a928cb6b953694dae90d04581e2c2606ccc18fc
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from string import ascii_letters, digits
from ansible.compat.six import string_types
from ansible.compat.six.moves import configparser
from ansible.parsing.quoting import unquote
from ansible.errors import AnsibleOptionsError
# copied from utils, avoid circular reference fun :)
def mk_boolean(value):
if value is None:
return False
val = str(value)
if val.lower() in [ "true", "t", "y", "1", "yes" ]:
return True
else:
return False
def shell_expand(path):
'''
shell_expand is needed as os.path.expanduser does not work
when path is None, which is the default for ANSIBLE_PRIVATE_KEY_FILE
'''
if path:
path = os.path.expanduser(os.path.expandvars(path))
return path
def get_config(p, section, key, env_var, default, boolean=False, integer=False, floating=False, islist=False, isnone=False, ispath=False):
''' return a configuration variable with casting '''
value = _get_config(p, section, key, env_var, default)
if boolean:
value = mk_boolean(value)
if value:
if integer:
value = int(value)
elif floating:
value = float(value)
elif islist:
if isinstance(value, string_types):
value = [x.strip() for x in value.split(',')]
elif isnone:
if value == "None":
value = None
elif ispath:
value = shell_expand(value)
elif isinstance(value, string_types):
value = unquote(value)
return value
def _get_config(p, section, key, env_var, default):
''' helper function for get_config '''
if env_var is not None:
value = os.environ.get(env_var, None)
if value is not None:
return value
if p is not None:
try:
return p.get(section, key, raw=True)
except:
return default
return default
def load_config_file():
''' Load Config File order(first found is used): ENV, CWD, HOME, /etc/ansible '''
p = configparser.ConfigParser()
path0 = os.getenv("ANSIBLE_CONFIG", None)
if path0 is not None:
path0 = os.path.expanduser(path0)
if os.path.isdir(path0):
path0 += "/ansible.cfg"
path1 = os.getcwd() + "/ansible.cfg"
path2 = os.path.expanduser("~/.ansible.cfg")
path3 = "/etc/ansible/ansible.cfg"
for path in [path0, path1, path2, path3]:
if path is not None and os.path.exists(path):
try:
p.read(path)
except configparser.Error as e:
raise AnsibleOptionsError("Error reading config file: \n{0}".format(e))
return p, path
return None, ''
p, CONFIG_FILE = load_config_file()
# check all of these extensions when looking for yaml files for things like
# group variables -- really anything we can load
YAML_FILENAME_EXTENSIONS = [ "", ".yml", ".yaml", ".json" ]
# the default whitelist for cow stencils
DEFAULT_COW_WHITELIST = ['bud-frogs', 'bunny', 'cheese', 'daemon', 'default', 'dragon', 'elephant-in-snake', 'elephant',
'eyes', 'hellokitty', 'kitty', 'luke-koala', 'meow', 'milk', 'moofasa', 'moose', 'ren', 'sheep',
'small', 'stegosaurus', 'stimpy', 'supermilker', 'three-eyes', 'turkey', 'turtle', 'tux', 'udder',
'vader-koala', 'vader', 'www',]
# sections in config file
DEFAULTS='defaults'
# FIXME: add deprecation warning when these get set
#### DEPRECATED VARS ####
# use more sanely named 'inventory'
DEPRECATED_HOST_LIST = get_config(p, DEFAULTS, 'hostfile', 'ANSIBLE_HOSTS', '/etc/ansible/hosts', ispath=True)
# this is not used since 0.5 but people might still have in config
DEFAULT_PATTERN = get_config(p, DEFAULTS, 'pattern', None, None)
#### GENERALLY CONFIGURABLE THINGS ####
DEFAULT_DEBUG = get_config(p, DEFAULTS, 'debug', 'ANSIBLE_DEBUG', False, boolean=True)
DEFAULT_HOST_LIST = get_config(p, DEFAULTS,'inventory', 'ANSIBLE_INVENTORY', DEPRECATED_HOST_LIST, ispath=True)
DEFAULT_MODULE_PATH = get_config(p, DEFAULTS, 'library', 'ANSIBLE_LIBRARY', None, ispath=True)
DEFAULT_ROLES_PATH = get_config(p, DEFAULTS, 'roles_path', 'ANSIBLE_ROLES_PATH', '/etc/ansible/roles', ispath=True)
DEFAULT_REMOTE_TMP = get_config(p, DEFAULTS, 'remote_tmp', 'ANSIBLE_REMOTE_TEMP', '$HOME/.ansible/tmp')
DEFAULT_MODULE_NAME = get_config(p, DEFAULTS, 'module_name', None, 'command')
DEFAULT_FORKS = get_config(p, DEFAULTS, 'forks', 'ANSIBLE_FORKS', 5, integer=True)
DEFAULT_MODULE_ARGS = get_config(p, DEFAULTS, 'module_args', 'ANSIBLE_MODULE_ARGS', '')
DEFAULT_MODULE_LANG = get_config(p, DEFAULTS, 'module_lang', 'ANSIBLE_MODULE_LANG', os.getenv('LANG', 'en_US.UTF-8'))
DEFAULT_TIMEOUT = get_config(p, DEFAULTS, 'timeout', 'ANSIBLE_TIMEOUT', 10, integer=True)
DEFAULT_POLL_INTERVAL = get_config(p, DEFAULTS, 'poll_interval', 'ANSIBLE_POLL_INTERVAL', 15, integer=True)
DEFAULT_REMOTE_USER = get_config(p, DEFAULTS, 'remote_user', 'ANSIBLE_REMOTE_USER', None)
DEFAULT_ASK_PASS = get_config(p, DEFAULTS, 'ask_pass', 'ANSIBLE_ASK_PASS', False, boolean=True)
DEFAULT_PRIVATE_KEY_FILE = get_config(p, DEFAULTS, 'private_key_file', 'ANSIBLE_PRIVATE_KEY_FILE', None, ispath=True)
DEFAULT_REMOTE_PORT = get_config(p, DEFAULTS, 'remote_port', 'ANSIBLE_REMOTE_PORT', None, integer=True)
DEFAULT_ASK_VAULT_PASS = get_config(p, DEFAULTS, 'ask_vault_pass', 'ANSIBLE_ASK_VAULT_PASS', False, boolean=True)
DEFAULT_VAULT_PASSWORD_FILE = get_config(p, DEFAULTS, 'vault_password_file', 'ANSIBLE_VAULT_PASSWORD_FILE', None, ispath=True)
DEFAULT_TRANSPORT = get_config(p, DEFAULTS, 'transport', 'ANSIBLE_TRANSPORT', 'smart')
DEFAULT_SCP_IF_SSH = get_config(p, 'ssh_connection', 'scp_if_ssh', 'ANSIBLE_SCP_IF_SSH', False, boolean=True)
DEFAULT_SFTP_BATCH_MODE = get_config(p, 'ssh_connection', 'sftp_batch_mode', 'ANSIBLE_SFTP_BATCH_MODE', True, boolean=True)
DEFAULT_MANAGED_STR = get_config(p, DEFAULTS, 'ansible_managed', None, 'Ansible managed: {file} modified on %Y-%m-%d %H:%M:%S by {uid} on {host}')
DEFAULT_SYSLOG_FACILITY = get_config(p, DEFAULTS, 'syslog_facility', 'ANSIBLE_SYSLOG_FACILITY', 'LOG_USER')
DEFAULT_KEEP_REMOTE_FILES = get_config(p, DEFAULTS, 'keep_remote_files', 'ANSIBLE_KEEP_REMOTE_FILES', False, boolean=True)
DEFAULT_HASH_BEHAVIOUR = get_config(p, DEFAULTS, 'hash_behaviour', 'ANSIBLE_HASH_BEHAVIOUR', 'replace')
DEFAULT_PRIVATE_ROLE_VARS = get_config(p, DEFAULTS, 'private_role_vars', 'ANSIBLE_PRIVATE_ROLE_VARS', False, boolean=True)
DEFAULT_JINJA2_EXTENSIONS = get_config(p, DEFAULTS, 'jinja2_extensions', 'ANSIBLE_JINJA2_EXTENSIONS', None)
DEFAULT_EXECUTABLE = get_config(p, DEFAULTS, 'executable', 'ANSIBLE_EXECUTABLE', '/bin/sh')
DEFAULT_GATHERING = get_config(p, DEFAULTS, 'gathering', 'ANSIBLE_GATHERING', 'implicit').lower()
DEFAULT_LOG_PATH = get_config(p, DEFAULTS, 'log_path', 'ANSIBLE_LOG_PATH', '', ispath=True)
DEFAULT_FORCE_HANDLERS = get_config(p, DEFAULTS, 'force_handlers', 'ANSIBLE_FORCE_HANDLERS', False, boolean=True)
DEFAULT_INVENTORY_IGNORE = get_config(p, DEFAULTS, 'inventory_ignore_extensions', 'ANSIBLE_INVENTORY_IGNORE', ["~", ".orig", ".bak", ".ini", ".cfg", ".retry", ".pyc", ".pyo"], islist=True)
DEFAULT_VAR_COMPRESSION_LEVEL = get_config(p, DEFAULTS, 'var_compression_level', 'ANSIBLE_VAR_COMPRESSION_LEVEL', 0, integer=True)
# disclosure
DEFAULT_NO_LOG = get_config(p, DEFAULTS, 'no_log', 'ANSIBLE_NO_LOG', False, boolean=True)
DEFAULT_NO_TARGET_SYSLOG = get_config(p, DEFAULTS, 'no_target_syslog', 'ANSIBLE_NO_TARGET_SYSLOG', False, boolean=True)
# selinux
DEFAULT_SELINUX_SPECIAL_FS = get_config(p, 'selinux', 'special_context_filesystems', None, 'fuse, nfs, vboxsf, ramfs', islist=True)
### PRIVILEGE ESCALATION ###
# Backwards Compat
DEFAULT_SU = get_config(p, DEFAULTS, 'su', 'ANSIBLE_SU', False, boolean=True)
DEFAULT_SU_USER = get_config(p, DEFAULTS, 'su_user', 'ANSIBLE_SU_USER', 'root')
DEFAULT_SU_EXE = get_config(p, DEFAULTS, 'su_exe', 'ANSIBLE_SU_EXE', None)
DEFAULT_SU_FLAGS = get_config(p, DEFAULTS, 'su_flags', 'ANSIBLE_SU_FLAGS', None)
DEFAULT_ASK_SU_PASS = get_config(p, DEFAULTS, 'ask_su_pass', 'ANSIBLE_ASK_SU_PASS', False, boolean=True)
DEFAULT_SUDO = get_config(p, DEFAULTS, 'sudo', 'ANSIBLE_SUDO', False, boolean=True)
DEFAULT_SUDO_USER = get_config(p, DEFAULTS, 'sudo_user', 'ANSIBLE_SUDO_USER', 'root')
DEFAULT_SUDO_EXE = get_config(p, DEFAULTS, 'sudo_exe', 'ANSIBLE_SUDO_EXE', None)
DEFAULT_SUDO_FLAGS = get_config(p, DEFAULTS, 'sudo_flags', 'ANSIBLE_SUDO_FLAGS', '-H -S -n')
DEFAULT_ASK_SUDO_PASS = get_config(p, DEFAULTS, 'ask_sudo_pass', 'ANSIBLE_ASK_SUDO_PASS', False, boolean=True)
# Become
BECOME_ERROR_STRINGS = {'sudo': 'Sorry, try again.', 'su': 'Authentication failure', 'pbrun': '', 'pfexec': '', 'runas': '', 'doas': 'Permission denied'} #FIXME: deal with i18n
BECOME_MISSING_STRINGS = {'sudo': 'sorry, a password is required to run sudo', 'su': '', 'pbrun': '', 'pfexec': '', 'runas': '', 'doas': 'Authorization required'} #FIXME: deal with i18n
BECOME_METHODS = ['sudo','su','pbrun','pfexec','runas','doas']
BECOME_ALLOW_SAME_USER = get_config(p, 'privilege_escalation', 'become_allow_same_user', 'ANSIBLE_BECOME_ALLOW_SAME_USER', False, boolean=True)
DEFAULT_BECOME_METHOD = get_config(p, 'privilege_escalation', 'become_method', 'ANSIBLE_BECOME_METHOD','sudo' if DEFAULT_SUDO else 'su' if DEFAULT_SU else 'sudo' ).lower()
DEFAULT_BECOME = get_config(p, 'privilege_escalation', 'become', 'ANSIBLE_BECOME',False, boolean=True)
DEFAULT_BECOME_USER = get_config(p, 'privilege_escalation', 'become_user', 'ANSIBLE_BECOME_USER', 'root')
DEFAULT_BECOME_EXE = get_config(p, 'privilege_escalation', 'become_exe', 'ANSIBLE_BECOME_EXE', None)
DEFAULT_BECOME_FLAGS = get_config(p, 'privilege_escalation', 'become_flags', 'ANSIBLE_BECOME_FLAGS', None)
DEFAULT_BECOME_ASK_PASS = get_config(p, 'privilege_escalation', 'become_ask_pass', 'ANSIBLE_BECOME_ASK_PASS', False, boolean=True)
# PLUGINS
# Modules that can optimize with_items loops into a single call. Currently
# these modules must (1) take a "name" or "pkg" parameter that is a list. If
# the module takes both, bad things could happen.
# In the future we should probably generalize this even further
# (mapping of param: squash field)
DEFAULT_SQUASH_ACTIONS = get_config(p, DEFAULTS, 'squash_actions', 'ANSIBLE_SQUASH_ACTIONS', "apt, dnf, package, pkgng, yum, zypper", islist=True)
# paths
DEFAULT_ACTION_PLUGIN_PATH = get_config(p, DEFAULTS, 'action_plugins', 'ANSIBLE_ACTION_PLUGINS', '~/.ansible/plugins/action:/usr/share/ansible/plugins/action', ispath=True)
DEFAULT_CACHE_PLUGIN_PATH = get_config(p, DEFAULTS, 'cache_plugins', 'ANSIBLE_CACHE_PLUGINS', '~/.ansible/plugins/cache:/usr/share/ansible/plugins/cache', ispath=True)
DEFAULT_CALLBACK_PLUGIN_PATH = get_config(p, DEFAULTS, 'callback_plugins', 'ANSIBLE_CALLBACK_PLUGINS', '~/.ansible/plugins/callback:/usr/share/ansible/plugins/callback', ispath=True)
DEFAULT_CONNECTION_PLUGIN_PATH = get_config(p, DEFAULTS, 'connection_plugins', 'ANSIBLE_CONNECTION_PLUGINS', '~/.ansible/plugins/connection:/usr/share/ansible/plugins/connection', ispath=True)
DEFAULT_LOOKUP_PLUGIN_PATH = get_config(p, DEFAULTS, 'lookup_plugins', 'ANSIBLE_LOOKUP_PLUGINS', '~/.ansible/plugins/lookup:/usr/share/ansible/plugins/lookup', ispath=True)
DEFAULT_INVENTORY_PLUGIN_PATH = get_config(p, DEFAULTS, 'inventory_plugins', 'ANSIBLE_INVENTORY_PLUGINS', '~/.ansible/plugins/inventory:/usr/share/ansible/plugins/inventory', ispath=True)
DEFAULT_VARS_PLUGIN_PATH = get_config(p, DEFAULTS, 'vars_plugins', 'ANSIBLE_VARS_PLUGINS', '~/.ansible/plugins/vars:/usr/share/ansible/plugins/vars', ispath=True)
DEFAULT_FILTER_PLUGIN_PATH = get_config(p, DEFAULTS, 'filter_plugins', 'ANSIBLE_FILTER_PLUGINS', '~/.ansible/plugins/filter:/usr/share/ansible/plugins/filter', ispath=True)
DEFAULT_TEST_PLUGIN_PATH = get_config(p, DEFAULTS, 'test_plugins', 'ANSIBLE_TEST_PLUGINS', '~/.ansible/plugins/test:/usr/share/ansible/plugins/test', ispath=True)
DEFAULT_STDOUT_CALLBACK = get_config(p, DEFAULTS, 'stdout_callback', 'ANSIBLE_STDOUT_CALLBACK', 'default')
# cache
CACHE_PLUGIN = get_config(p, DEFAULTS, 'fact_caching', 'ANSIBLE_CACHE_PLUGIN', 'memory')
CACHE_PLUGIN_CONNECTION = get_config(p, DEFAULTS, 'fact_caching_connection', 'ANSIBLE_CACHE_PLUGIN_CONNECTION', None)
CACHE_PLUGIN_PREFIX = get_config(p, DEFAULTS, 'fact_caching_prefix', 'ANSIBLE_CACHE_PLUGIN_PREFIX', 'ansible_facts')
CACHE_PLUGIN_TIMEOUT = get_config(p, DEFAULTS, 'fact_caching_timeout', 'ANSIBLE_CACHE_PLUGIN_TIMEOUT', 24 * 60 * 60, integer=True)
# Display
ANSIBLE_FORCE_COLOR = get_config(p, DEFAULTS, 'force_color', 'ANSIBLE_FORCE_COLOR', None, boolean=True)
ANSIBLE_NOCOLOR = get_config(p, DEFAULTS, 'nocolor', 'ANSIBLE_NOCOLOR', None, boolean=True)
ANSIBLE_NOCOWS = get_config(p, DEFAULTS, 'nocows', 'ANSIBLE_NOCOWS', None, boolean=True)
ANSIBLE_COW_SELECTION = get_config(p, DEFAULTS, 'cow_selection', 'ANSIBLE_COW_SELECTION', 'default')
ANSIBLE_COW_WHITELIST = get_config(p, DEFAULTS, 'cow_whitelist', 'ANSIBLE_COW_WHITELIST', DEFAULT_COW_WHITELIST, islist=True)
DISPLAY_SKIPPED_HOSTS = get_config(p, DEFAULTS, 'display_skipped_hosts', 'DISPLAY_SKIPPED_HOSTS', True, boolean=True)
DEFAULT_UNDEFINED_VAR_BEHAVIOR = get_config(p, DEFAULTS, 'error_on_undefined_vars', 'ANSIBLE_ERROR_ON_UNDEFINED_VARS', True, boolean=True)
HOST_KEY_CHECKING = get_config(p, DEFAULTS, 'host_key_checking', 'ANSIBLE_HOST_KEY_CHECKING', True, boolean=True)
SYSTEM_WARNINGS = get_config(p, DEFAULTS, 'system_warnings', 'ANSIBLE_SYSTEM_WARNINGS', True, boolean=True)
DEPRECATION_WARNINGS = get_config(p, DEFAULTS, 'deprecation_warnings', 'ANSIBLE_DEPRECATION_WARNINGS', True, boolean=True)
DEFAULT_CALLABLE_WHITELIST = get_config(p, DEFAULTS, 'callable_whitelist', 'ANSIBLE_CALLABLE_WHITELIST', [], islist=True)
COMMAND_WARNINGS = get_config(p, DEFAULTS, 'command_warnings', 'ANSIBLE_COMMAND_WARNINGS', True, boolean=True)
DEFAULT_LOAD_CALLBACK_PLUGINS = get_config(p, DEFAULTS, 'bin_ansible_callbacks', 'ANSIBLE_LOAD_CALLBACK_PLUGINS', False, boolean=True)
DEFAULT_CALLBACK_WHITELIST = get_config(p, DEFAULTS, 'callback_whitelist', 'ANSIBLE_CALLBACK_WHITELIST', [], islist=True)
RETRY_FILES_ENABLED = get_config(p, DEFAULTS, 'retry_files_enabled', 'ANSIBLE_RETRY_FILES_ENABLED', True, boolean=True)
RETRY_FILES_SAVE_PATH = get_config(p, DEFAULTS, 'retry_files_save_path', 'ANSIBLE_RETRY_FILES_SAVE_PATH', '~/', ispath=True)
DEFAULT_NULL_REPRESENTATION = get_config(p, DEFAULTS, 'null_representation', 'ANSIBLE_NULL_REPRESENTATION', None, isnone=True)
# CONNECTION RELATED
ANSIBLE_SSH_ARGS = get_config(p, 'ssh_connection', 'ssh_args', 'ANSIBLE_SSH_ARGS', '-o ControlMaster=auto -o ControlPersist=60s')
ANSIBLE_SSH_CONTROL_PATH = get_config(p, 'ssh_connection', 'control_path', 'ANSIBLE_SSH_CONTROL_PATH', "%(directory)s/ansible-ssh-%%h-%%p-%%r")
ANSIBLE_SSH_PIPELINING = get_config(p, 'ssh_connection', 'pipelining', 'ANSIBLE_SSH_PIPELINING', False, boolean=True)
ANSIBLE_SSH_RETRIES = get_config(p, 'ssh_connection', 'retries', 'ANSIBLE_SSH_RETRIES', 0, integer=True)
PARAMIKO_RECORD_HOST_KEYS = get_config(p, 'paramiko_connection', 'record_host_keys', 'ANSIBLE_PARAMIKO_RECORD_HOST_KEYS', True, boolean=True)
# obsolete -- will be formally removed
ZEROMQ_PORT = get_config(p, 'fireball_connection', 'zeromq_port', 'ANSIBLE_ZEROMQ_PORT', 5099, integer=True)
ACCELERATE_PORT = get_config(p, 'accelerate', 'accelerate_port', 'ACCELERATE_PORT', 5099, integer=True)
ACCELERATE_TIMEOUT = get_config(p, 'accelerate', 'accelerate_timeout', 'ACCELERATE_TIMEOUT', 30, integer=True)
ACCELERATE_CONNECT_TIMEOUT = get_config(p, 'accelerate', 'accelerate_connect_timeout', 'ACCELERATE_CONNECT_TIMEOUT', 1.0, floating=True)
ACCELERATE_DAEMON_TIMEOUT = get_config(p, 'accelerate', 'accelerate_daemon_timeout', 'ACCELERATE_DAEMON_TIMEOUT', 30, integer=True)
ACCELERATE_KEYS_DIR = get_config(p, 'accelerate', 'accelerate_keys_dir', 'ACCELERATE_KEYS_DIR', '~/.fireball.keys')
ACCELERATE_KEYS_DIR_PERMS = get_config(p, 'accelerate', 'accelerate_keys_dir_perms', 'ACCELERATE_KEYS_DIR_PERMS', '700')
ACCELERATE_KEYS_FILE_PERMS = get_config(p, 'accelerate', 'accelerate_keys_file_perms', 'ACCELERATE_KEYS_FILE_PERMS', '600')
ACCELERATE_MULTI_KEY = get_config(p, 'accelerate', 'accelerate_multi_key', 'ACCELERATE_MULTI_KEY', False, boolean=True)
PARAMIKO_PTY = get_config(p, 'paramiko_connection', 'pty', 'ANSIBLE_PARAMIKO_PTY', True, boolean=True)
# galaxy related
GALAXY_SERVER = get_config(p, 'galaxy', 'server', 'ANSIBLE_GALAXY_SERVER', 'https://galaxy.ansible.com')
GALAXY_IGNORE_CERTS = get_config(p, 'galaxy', 'ignore_certs', 'ANSIBLE_GALAXY_IGNORE', False, boolean=True)
# this can be configured to blacklist SCMS but cannot add new ones unless the code is also updated
GALAXY_SCMS = get_config(p, 'galaxy', 'scms', 'ANSIBLE_GALAXY_SCMS', 'git, hg', islist=True)
# characters included in auto-generated passwords
DEFAULT_PASSWORD_CHARS = ascii_letters + digits + ".,:-_"
STRING_TYPE_FILTERS = get_config(p, 'jinja2', 'dont_type_filters', 'ANSIBLE_STRING_TYPE_FILTERS', ['string', 'to_json', 'to_nice_json', 'to_yaml', 'ppretty', 'json'], islist=True )
# colors
COLOR_VERBOSE = get_config(p, 'colors', 'verbose', 'ANSIBLE_COLOR_VERBOSE', 'blue')
COLOR_WARN = get_config(p, 'colors', 'warn', 'ANSIBLE_COLOR_WARN', 'bright purple')
COLOR_ERROR = get_config(p, 'colors', 'error', 'ANSIBLE_COLOR_ERROR', 'red')
COLOR_DEBUG = get_config(p, 'colors', 'debug', 'ANSIBLE_COLOR_DEBUG', 'dark gray')
COLOR_DEPRECATE = get_config(p, 'colors', 'deprecate', 'ANSIBLE_COLOR_DEPRECATE', 'purple')
COLOR_SKIP = get_config(p, 'colors', 'skip', 'ANSIBLE_COLOR_SKIP', 'cyan')
COLOR_UNREACHABLE = get_config(p, 'colors', 'unreachable', 'ANSIBLE_COLOR_UNREACHABLE', 'bright red')
COLOR_OK = get_config(p, 'colors', 'ok', 'ANSIBLE_COLOR_OK', 'green')
COLOR_CHANGED = get_config(p, 'colors', 'ok', 'ANSIBLE_COLOR_CHANGED', 'yellow')
# non-configurable things
MODULE_REQUIRE_ARGS = ['command', 'shell', 'raw', 'script']
MODULE_NO_JSON = ['command', 'shell', 'raw']
DEFAULT_BECOME_PASS = None
DEFAULT_SUDO_PASS = None
DEFAULT_REMOTE_PASS = None
DEFAULT_SUBSET = None
DEFAULT_SU_PASS = None
VAULT_VERSION_MIN = 1.0
VAULT_VERSION_MAX = 1.0
MAX_FILE_SIZE_FOR_DIFF = 1*1024*1024
TREE_DIR = None
LOCALHOST = frozenset(['127.0.0.1', 'localhost', '::1'])
|
domibarton/ansible
|
lib/ansible/constants.py
|
Python
|
gpl-3.0
| 20,345
|
[
"Galaxy",
"MOOSE"
] |
9435a139eef702f7e3847ab5c94835ed572a044ac95bf8841981659ab0aff2a3
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import copy
import json
import os
import pytest
import re
import shutil
import stat
import tarfile
import yaml
from io import BytesIO, StringIO
from units.compat.mock import MagicMock
import ansible.module_utils.six.moves.urllib.error as urllib_error
from ansible import context
from ansible.cli.galaxy import GalaxyCLI
from ansible.errors import AnsibleError
from ansible.galaxy import collection, api
from ansible.module_utils._text import to_bytes, to_native, to_text
from ansible.utils import context_objects as co
from ansible.utils.display import Display
def call_galaxy_cli(args):
orig = co.GlobalCLIArgs._Singleton__instance
co.GlobalCLIArgs._Singleton__instance = None
try:
GalaxyCLI(args=['ansible-galaxy', 'collection'] + args).run()
finally:
co.GlobalCLIArgs._Singleton__instance = orig
def artifact_json(namespace, name, version, dependencies, server):
json_str = json.dumps({
'artifact': {
'filename': '%s-%s-%s.tar.gz' % (namespace, name, version),
'sha256': '2d76f3b8c4bab1072848107fb3914c345f71a12a1722f25c08f5d3f51f4ab5fd',
'size': 1234,
},
'download_url': '%s/download/%s-%s-%s.tar.gz' % (server, namespace, name, version),
'metadata': {
'namespace': namespace,
'name': name,
'dependencies': dependencies,
},
'version': version
})
return to_text(json_str)
def artifact_versions_json(namespace, name, versions, galaxy_api, available_api_versions=None):
results = []
available_api_versions = available_api_versions or {}
api_version = 'v2'
if 'v3' in available_api_versions:
api_version = 'v3'
for version in versions:
results.append({
'href': '%s/api/%s/%s/%s/versions/%s/' % (galaxy_api.api_server, api_version, namespace, name, version),
'version': version,
})
if api_version == 'v2':
json_str = json.dumps({
'count': len(versions),
'next': None,
'previous': None,
'results': results
})
if api_version == 'v3':
response = {'meta': {'count': len(versions)},
'data': results,
'links': {'first': None,
'last': None,
'next': None,
'previous': None},
}
json_str = json.dumps(response)
return to_text(json_str)
def error_json(galaxy_api, errors_to_return=None, available_api_versions=None):
errors_to_return = errors_to_return or []
available_api_versions = available_api_versions or {}
response = {}
api_version = 'v2'
if 'v3' in available_api_versions:
api_version = 'v3'
if api_version == 'v2':
assert len(errors_to_return) <= 1
if errors_to_return:
response = errors_to_return[0]
if api_version == 'v3':
response['errors'] = errors_to_return
json_str = json.dumps(response)
return to_text(json_str)
@pytest.fixture(autouse='function')
def reset_cli_args():
co.GlobalCLIArgs._Singleton__instance = None
yield
co.GlobalCLIArgs._Singleton__instance = None
@pytest.fixture()
def collection_artifact(request, tmp_path_factory):
test_dir = to_text(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
namespace = 'ansible_namespace'
collection = 'collection'
skeleton_path = os.path.join(os.path.dirname(os.path.split(__file__)[0]), 'cli', 'test_data', 'collection_skeleton')
collection_path = os.path.join(test_dir, namespace, collection)
call_galaxy_cli(['init', '%s.%s' % (namespace, collection), '-c', '--init-path', test_dir,
'--collection-skeleton', skeleton_path])
dependencies = getattr(request, 'param', None)
if dependencies:
galaxy_yml = os.path.join(collection_path, 'galaxy.yml')
with open(galaxy_yml, 'rb+') as galaxy_obj:
existing_yaml = yaml.safe_load(galaxy_obj)
existing_yaml['dependencies'] = dependencies
galaxy_obj.seek(0)
galaxy_obj.write(to_bytes(yaml.safe_dump(existing_yaml)))
galaxy_obj.truncate()
# Create a file with +x in the collection so we can test the permissions
execute_path = os.path.join(collection_path, 'runme.sh')
with open(execute_path, mode='wb') as fd:
fd.write(b"echo hi")
os.chmod(execute_path, os.stat(execute_path).st_mode | stat.S_IEXEC)
call_galaxy_cli(['build', collection_path, '--output-path', test_dir])
collection_tar = os.path.join(test_dir, '%s-%s-0.1.0.tar.gz' % (namespace, collection))
return to_bytes(collection_path), to_bytes(collection_tar)
@pytest.fixture()
def galaxy_server():
context.CLIARGS._store = {'ignore_certs': False}
galaxy_api = api.GalaxyAPI(None, 'test_server', 'https://galaxy.ansible.com')
return galaxy_api
def test_build_requirement_from_path(collection_artifact):
actual = collection.CollectionRequirement.from_path(collection_artifact[0], True)
assert actual.namespace == u'ansible_namespace'
assert actual.name == u'collection'
assert actual.b_path == collection_artifact[0]
assert actual.api is None
assert actual.skip is True
assert actual.versions == set([u'*'])
assert actual.latest_version == u'*'
assert actual.dependencies == {}
@pytest.mark.parametrize('version', ['1.1.1', '1.1.0', '1.0.0'])
def test_build_requirement_from_path_with_manifest(version, collection_artifact):
manifest_path = os.path.join(collection_artifact[0], b'MANIFEST.json')
manifest_value = json.dumps({
'collection_info': {
'namespace': 'namespace',
'name': 'name',
'version': version,
'dependencies': {
'ansible_namespace.collection': '*'
}
}
})
with open(manifest_path, 'wb') as manifest_obj:
manifest_obj.write(to_bytes(manifest_value))
actual = collection.CollectionRequirement.from_path(collection_artifact[0], True)
# While the folder name suggests a different collection, we treat MANIFEST.json as the source of truth.
assert actual.namespace == u'namespace'
assert actual.name == u'name'
assert actual.b_path == collection_artifact[0]
assert actual.api is None
assert actual.skip is True
assert actual.versions == set([to_text(version)])
assert actual.latest_version == to_text(version)
assert actual.dependencies == {'ansible_namespace.collection': '*'}
def test_build_requirement_from_path_invalid_manifest(collection_artifact):
manifest_path = os.path.join(collection_artifact[0], b'MANIFEST.json')
with open(manifest_path, 'wb') as manifest_obj:
manifest_obj.write(b"not json")
expected = "Collection file at '%s' does not contain a valid json string." % to_native(manifest_path)
with pytest.raises(AnsibleError, match=expected):
collection.CollectionRequirement.from_path(collection_artifact[0], True)
def test_build_requirement_from_path_no_version(collection_artifact, monkeypatch):
manifest_path = os.path.join(collection_artifact[0], b'MANIFEST.json')
manifest_value = json.dumps({
'collection_info': {
'namespace': 'namespace',
'name': 'name',
'version': '',
'dependencies': {}
}
})
with open(manifest_path, 'wb') as manifest_obj:
manifest_obj.write(to_bytes(manifest_value))
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
actual = collection.CollectionRequirement.from_path(collection_artifact[0], True)
# While the folder name suggests a different collection, we treat MANIFEST.json as the source of truth.
assert actual.namespace == u'namespace'
assert actual.name == u'name'
assert actual.b_path == collection_artifact[0]
assert actual.api is None
assert actual.skip is True
assert actual.versions == set(['*'])
assert actual.latest_version == u'*'
assert actual.dependencies == {}
assert mock_display.call_count == 1
actual_warn = ' '.join(mock_display.mock_calls[0][1][0].split('\n'))
expected_warn = "Collection at '%s' does not have a valid version set, falling back to '*'. Found version: ''" \
% to_text(collection_artifact[0])
assert expected_warn in actual_warn
def test_build_requirement_from_tar(collection_artifact):
actual = collection.CollectionRequirement.from_tar(collection_artifact[1], True, True)
assert actual.namespace == u'ansible_namespace'
assert actual.name == u'collection'
assert actual.b_path == collection_artifact[1]
assert actual.api is None
assert actual.skip is False
assert actual.versions == set([u'0.1.0'])
assert actual.latest_version == u'0.1.0'
assert actual.dependencies == {}
def test_build_requirement_from_tar_fail_not_tar(tmp_path_factory):
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
test_file = os.path.join(test_dir, b'fake.tar.gz')
with open(test_file, 'wb') as test_obj:
test_obj.write(b"\x00\x01\x02\x03")
expected = "Collection artifact at '%s' is not a valid tar file." % to_native(test_file)
with pytest.raises(AnsibleError, match=expected):
collection.CollectionRequirement.from_tar(test_file, True, True)
def test_build_requirement_from_tar_no_manifest(tmp_path_factory):
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
json_data = to_bytes(json.dumps(
{
'files': [],
'format': 1,
}
))
tar_path = os.path.join(test_dir, b'ansible-collections.tar.gz')
with tarfile.open(tar_path, 'w:gz') as tfile:
b_io = BytesIO(json_data)
tar_info = tarfile.TarInfo('FILES.json')
tar_info.size = len(json_data)
tar_info.mode = 0o0644
tfile.addfile(tarinfo=tar_info, fileobj=b_io)
expected = "Collection at '%s' does not contain the required file MANIFEST.json." % to_native(tar_path)
with pytest.raises(AnsibleError, match=expected):
collection.CollectionRequirement.from_tar(tar_path, True, True)
def test_build_requirement_from_tar_no_files(tmp_path_factory):
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
json_data = to_bytes(json.dumps(
{
'collection_info': {},
}
))
tar_path = os.path.join(test_dir, b'ansible-collections.tar.gz')
with tarfile.open(tar_path, 'w:gz') as tfile:
b_io = BytesIO(json_data)
tar_info = tarfile.TarInfo('MANIFEST.json')
tar_info.size = len(json_data)
tar_info.mode = 0o0644
tfile.addfile(tarinfo=tar_info, fileobj=b_io)
expected = "Collection at '%s' does not contain the required file FILES.json." % to_native(tar_path)
with pytest.raises(AnsibleError, match=expected):
collection.CollectionRequirement.from_tar(tar_path, True, True)
def test_build_requirement_from_tar_invalid_manifest(tmp_path_factory):
test_dir = to_bytes(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Collections Input'))
json_data = b"not a json"
tar_path = os.path.join(test_dir, b'ansible-collections.tar.gz')
with tarfile.open(tar_path, 'w:gz') as tfile:
b_io = BytesIO(json_data)
tar_info = tarfile.TarInfo('MANIFEST.json')
tar_info.size = len(json_data)
tar_info.mode = 0o0644
tfile.addfile(tarinfo=tar_info, fileobj=b_io)
expected = "Collection tar file member MANIFEST.json does not contain a valid json string."
with pytest.raises(AnsibleError, match=expected):
collection.CollectionRequirement.from_tar(tar_path, True, True)
def test_build_requirement_from_name(galaxy_server, monkeypatch):
mock_get_versions = MagicMock()
mock_get_versions.return_value = ['2.1.9', '2.1.10']
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions)
actual = collection.CollectionRequirement.from_name('namespace.collection', [galaxy_server], '*', True, True)
assert actual.namespace == u'namespace'
assert actual.name == u'collection'
assert actual.b_path is None
assert actual.api == galaxy_server
assert actual.skip is False
assert actual.versions == set([u'2.1.9', u'2.1.10'])
assert actual.latest_version == u'2.1.10'
assert actual.dependencies == {}
assert mock_get_versions.call_count == 1
assert mock_get_versions.mock_calls[0][1] == ('namespace', 'collection')
def test_build_requirement_from_name_with_prerelease(galaxy_server, monkeypatch):
mock_get_versions = MagicMock()
mock_get_versions.return_value = ['1.0.1', '2.0.1-beta.1', '2.0.1']
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions)
actual = collection.CollectionRequirement.from_name('namespace.collection', [galaxy_server], '*', True, True)
assert actual.namespace == u'namespace'
assert actual.name == u'collection'
assert actual.b_path is None
assert actual.api == galaxy_server
assert actual.skip is False
assert actual.versions == set([u'1.0.1', u'2.0.1'])
assert actual.latest_version == u'2.0.1'
assert actual.dependencies == {}
assert mock_get_versions.call_count == 1
assert mock_get_versions.mock_calls[0][1] == ('namespace', 'collection')
def test_build_requirment_from_name_with_prerelease_explicit(galaxy_server, monkeypatch):
mock_get_info = MagicMock()
mock_get_info.return_value = api.CollectionVersionMetadata('namespace', 'collection', '2.0.1-beta.1', None, None,
{})
monkeypatch.setattr(galaxy_server, 'get_collection_version_metadata', mock_get_info)
actual = collection.CollectionRequirement.from_name('namespace.collection', [galaxy_server], '2.0.1-beta.1', True,
True)
assert actual.namespace == u'namespace'
assert actual.name == u'collection'
assert actual.b_path is None
assert actual.api == galaxy_server
assert actual.skip is False
assert actual.versions == set([u'2.0.1-beta.1'])
assert actual.latest_version == u'2.0.1-beta.1'
assert actual.dependencies == {}
assert mock_get_info.call_count == 1
assert mock_get_info.mock_calls[0][1] == ('namespace', 'collection', '2.0.1-beta.1')
def test_build_requirement_from_name_second_server(galaxy_server, monkeypatch):
mock_get_versions = MagicMock()
mock_get_versions.return_value = ['1.0.1', '1.0.2', '1.0.3']
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions)
broken_server = copy.copy(galaxy_server)
broken_server.api_server = 'https://broken.com/'
mock_404 = MagicMock()
mock_404.side_effect = api.GalaxyError(urllib_error.HTTPError('https://galaxy.server.com', 404, 'msg', {},
StringIO()), "custom msg")
monkeypatch.setattr(broken_server, 'get_collection_versions', mock_404)
actual = collection.CollectionRequirement.from_name('namespace.collection', [broken_server, galaxy_server],
'>1.0.1', False, True)
assert actual.namespace == u'namespace'
assert actual.name == u'collection'
assert actual.b_path is None
# assert actual.api == galaxy_server
assert actual.skip is False
assert actual.versions == set([u'1.0.2', u'1.0.3'])
assert actual.latest_version == u'1.0.3'
assert actual.dependencies == {}
assert mock_404.call_count == 1
assert mock_404.mock_calls[0][1] == ('namespace', 'collection')
assert mock_get_versions.call_count == 1
assert mock_get_versions.mock_calls[0][1] == ('namespace', 'collection')
def test_build_requirement_from_name_missing(galaxy_server, monkeypatch):
mock_open = MagicMock()
mock_open.side_effect = api.GalaxyError(urllib_error.HTTPError('https://galaxy.server.com', 404, 'msg', {},
StringIO()), "")
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_open)
expected = "Failed to find collection namespace.collection:*"
with pytest.raises(AnsibleError, match=expected):
collection.CollectionRequirement.from_name('namespace.collection', [galaxy_server, galaxy_server], '*', False,
True)
def test_build_requirement_from_name_401_unauthorized(galaxy_server, monkeypatch):
mock_open = MagicMock()
mock_open.side_effect = api.GalaxyError(urllib_error.HTTPError('https://galaxy.server.com', 401, 'msg', {},
StringIO()), "error")
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_open)
expected = "error (HTTP Code: 401, Message: msg)"
with pytest.raises(api.GalaxyError, match=re.escape(expected)):
collection.CollectionRequirement.from_name('namespace.collection', [galaxy_server, galaxy_server], '*', False)
def test_build_requirement_from_name_single_version(galaxy_server, monkeypatch):
mock_get_info = MagicMock()
mock_get_info.return_value = api.CollectionVersionMetadata('namespace', 'collection', '2.0.0', None, None,
{})
monkeypatch.setattr(galaxy_server, 'get_collection_version_metadata', mock_get_info)
actual = collection.CollectionRequirement.from_name('namespace.collection', [galaxy_server], '2.0.0', True,
True)
assert actual.namespace == u'namespace'
assert actual.name == u'collection'
assert actual.b_path is None
assert actual.api == galaxy_server
assert actual.skip is False
assert actual.versions == set([u'2.0.0'])
assert actual.latest_version == u'2.0.0'
assert actual.dependencies == {}
assert mock_get_info.call_count == 1
assert mock_get_info.mock_calls[0][1] == ('namespace', 'collection', '2.0.0')
def test_build_requirement_from_name_multiple_versions_one_match(galaxy_server, monkeypatch):
mock_get_versions = MagicMock()
mock_get_versions.return_value = ['2.0.0', '2.0.1', '2.0.2']
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions)
mock_get_info = MagicMock()
mock_get_info.return_value = api.CollectionVersionMetadata('namespace', 'collection', '2.0.1', None, None,
{})
monkeypatch.setattr(galaxy_server, 'get_collection_version_metadata', mock_get_info)
actual = collection.CollectionRequirement.from_name('namespace.collection', [galaxy_server], '>=2.0.1,<2.0.2',
True, True)
assert actual.namespace == u'namespace'
assert actual.name == u'collection'
assert actual.b_path is None
assert actual.api == galaxy_server
assert actual.skip is False
assert actual.versions == set([u'2.0.1'])
assert actual.latest_version == u'2.0.1'
assert actual.dependencies == {}
assert mock_get_versions.call_count == 1
assert mock_get_versions.mock_calls[0][1] == ('namespace', 'collection')
assert mock_get_info.call_count == 1
assert mock_get_info.mock_calls[0][1] == ('namespace', 'collection', '2.0.1')
def test_build_requirement_from_name_multiple_version_results(galaxy_server, monkeypatch):
mock_get_versions = MagicMock()
mock_get_versions.return_value = ['2.0.0', '2.0.1', '2.0.2', '2.0.3', '2.0.4', '2.0.5']
monkeypatch.setattr(galaxy_server, 'get_collection_versions', mock_get_versions)
actual = collection.CollectionRequirement.from_name('namespace.collection', [galaxy_server], '!=2.0.2',
True, True)
assert actual.namespace == u'namespace'
assert actual.name == u'collection'
assert actual.b_path is None
assert actual.api == galaxy_server
assert actual.skip is False
assert actual.versions == set([u'2.0.0', u'2.0.1', u'2.0.3', u'2.0.4', u'2.0.5'])
assert actual.latest_version == u'2.0.5'
assert actual.dependencies == {}
assert mock_get_versions.call_count == 1
assert mock_get_versions.mock_calls[0][1] == ('namespace', 'collection')
@pytest.mark.parametrize('versions, requirement, expected_filter, expected_latest', [
[['1.0.0', '1.0.1'], '*', ['1.0.0', '1.0.1'], '1.0.1'],
[['1.0.0', '1.0.5', '1.1.0'], '>1.0.0,<1.1.0', ['1.0.5'], '1.0.5'],
[['1.0.0', '1.0.5', '1.1.0'], '>1.0.0,<=1.0.5', ['1.0.5'], '1.0.5'],
[['1.0.0', '1.0.5', '1.1.0'], '>=1.1.0', ['1.1.0'], '1.1.0'],
[['1.0.0', '1.0.5', '1.1.0'], '!=1.1.0', ['1.0.0', '1.0.5'], '1.0.5'],
[['1.0.0', '1.0.5', '1.1.0'], '==1.0.5', ['1.0.5'], '1.0.5'],
[['1.0.0', '1.0.5', '1.1.0'], '1.0.5', ['1.0.5'], '1.0.5'],
[['1.0.0', '2.0.0', '3.0.0'], '>=2', ['2.0.0', '3.0.0'], '3.0.0'],
])
def test_add_collection_requirements(versions, requirement, expected_filter, expected_latest):
req = collection.CollectionRequirement('namespace', 'name', None, 'https://galaxy.com', versions, requirement,
False)
assert req.versions == set(expected_filter)
assert req.latest_version == expected_latest
def test_add_collection_requirement_to_unknown_installed_version(monkeypatch):
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
req = collection.CollectionRequirement('namespace', 'name', None, 'https://galaxy.com', ['*'], '*', False,
skip=True)
req.add_requirement('parent.collection', '1.0.0')
assert req.latest_version == '*'
assert mock_display.call_count == 1
actual_warn = ' '.join(mock_display.mock_calls[0][1][0].split('\n'))
assert "Failed to validate the collection requirement 'namespace.name:1.0.0' for parent.collection" in actual_warn
def test_add_collection_wildcard_requirement_to_unknown_installed_version():
req = collection.CollectionRequirement('namespace', 'name', None, 'https://galaxy.com', ['*'], '*', False,
skip=True)
req.add_requirement(str(req), '*')
assert req.versions == set('*')
assert req.latest_version == '*'
def test_add_collection_requirement_with_conflict(galaxy_server):
expected = "Cannot meet requirement ==1.0.2 for dependency namespace.name from source '%s'. Available versions " \
"before last requirement added: 1.0.0, 1.0.1\n" \
"Requirements from:\n" \
"\tbase - 'namespace.name:==1.0.2'" % galaxy_server.api_server
with pytest.raises(AnsibleError, match=expected):
collection.CollectionRequirement('namespace', 'name', None, galaxy_server, ['1.0.0', '1.0.1'], '==1.0.2',
False)
def test_add_requirement_to_existing_collection_with_conflict(galaxy_server):
req = collection.CollectionRequirement('namespace', 'name', None, galaxy_server, ['1.0.0', '1.0.1'], '*', False)
expected = "Cannot meet dependency requirement 'namespace.name:1.0.2' for collection namespace.collection2 from " \
"source '%s'. Available versions before last requirement added: 1.0.0, 1.0.1\n" \
"Requirements from:\n" \
"\tbase - 'namespace.name:*'\n" \
"\tnamespace.collection2 - 'namespace.name:1.0.2'" % galaxy_server.api_server
with pytest.raises(AnsibleError, match=re.escape(expected)):
req.add_requirement('namespace.collection2', '1.0.2')
def test_add_requirement_to_installed_collection_with_conflict():
source = 'https://galaxy.ansible.com'
req = collection.CollectionRequirement('namespace', 'name', None, source, ['1.0.0', '1.0.1'], '*', False,
skip=True)
expected = "Cannot meet requirement namespace.name:1.0.2 as it is already installed at version '1.0.1'. " \
"Use --force to overwrite"
with pytest.raises(AnsibleError, match=re.escape(expected)):
req.add_requirement(None, '1.0.2')
def test_add_requirement_to_installed_collection_with_conflict_as_dep():
source = 'https://galaxy.ansible.com'
req = collection.CollectionRequirement('namespace', 'name', None, source, ['1.0.0', '1.0.1'], '*', False,
skip=True)
expected = "Cannot meet requirement namespace.name:1.0.2 as it is already installed at version '1.0.1'. " \
"Use --force-with-deps to overwrite"
with pytest.raises(AnsibleError, match=re.escape(expected)):
req.add_requirement('namespace.collection2', '1.0.2')
def test_install_skipped_collection(monkeypatch):
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
req = collection.CollectionRequirement('namespace', 'name', None, 'source', ['1.0.0'], '*', False, skip=True)
req.install(None, None)
assert mock_display.call_count == 1
assert mock_display.mock_calls[0][1][0] == "Skipping 'namespace.name' as it is already installed"
def test_install_collection(collection_artifact, monkeypatch):
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
collection_tar = collection_artifact[1]
output_path = os.path.join(os.path.split(collection_tar)[0], b'output')
collection_path = os.path.join(output_path, b'ansible_namespace', b'collection')
os.makedirs(os.path.join(collection_path, b'delete_me')) # Create a folder to verify the install cleans out the dir
temp_path = os.path.join(os.path.split(collection_tar)[0], b'temp')
os.makedirs(temp_path)
req = collection.CollectionRequirement.from_tar(collection_tar, True, True)
req.install(to_text(output_path), temp_path)
# Ensure the temp directory is empty, nothing is left behind
assert os.listdir(temp_path) == []
actual_files = os.listdir(collection_path)
actual_files.sort()
assert actual_files == [b'FILES.json', b'MANIFEST.json', b'README.md', b'docs', b'playbooks', b'plugins', b'roles',
b'runme.sh']
assert stat.S_IMODE(os.stat(os.path.join(collection_path, b'plugins')).st_mode) == 0o0755
assert stat.S_IMODE(os.stat(os.path.join(collection_path, b'README.md')).st_mode) == 0o0644
assert stat.S_IMODE(os.stat(os.path.join(collection_path, b'runme.sh')).st_mode) == 0o0755
assert mock_display.call_count == 2
assert mock_display.mock_calls[0][1][0] == "Installing 'ansible_namespace.collection:0.1.0' to '%s'" \
% to_text(collection_path)
assert mock_display.mock_calls[1][1][0] == "ansible_namespace.collection (0.1.0) was installed successfully"
def test_install_collection_with_download(galaxy_server, collection_artifact, monkeypatch):
collection_tar = collection_artifact[1]
output_path = os.path.join(os.path.split(collection_tar)[0], b'output')
collection_path = os.path.join(output_path, b'ansible_namespace', b'collection')
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
mock_download = MagicMock()
mock_download.return_value = collection_tar
monkeypatch.setattr(collection, '_download_file', mock_download)
monkeypatch.setattr(galaxy_server, '_available_api_versions', {'v2': 'v2/'})
temp_path = os.path.join(os.path.split(collection_tar)[0], b'temp')
os.makedirs(temp_path)
meta = api.CollectionVersionMetadata('ansible_namespace', 'collection', '0.1.0', 'https://downloadme.com',
'myhash', {})
req = collection.CollectionRequirement('ansible_namespace', 'collection', None, galaxy_server,
['0.1.0'], '*', False, metadata=meta)
req.install(to_text(output_path), temp_path)
# Ensure the temp directory is empty, nothing is left behind
assert os.listdir(temp_path) == []
actual_files = os.listdir(collection_path)
actual_files.sort()
assert actual_files == [b'FILES.json', b'MANIFEST.json', b'README.md', b'docs', b'playbooks', b'plugins', b'roles',
b'runme.sh']
assert mock_display.call_count == 2
assert mock_display.mock_calls[0][1][0] == "Installing 'ansible_namespace.collection:0.1.0' to '%s'" \
% to_text(collection_path)
assert mock_display.mock_calls[1][1][0] == "ansible_namespace.collection (0.1.0) was installed successfully"
assert mock_download.call_count == 1
assert mock_download.mock_calls[0][1][0] == 'https://downloadme.com'
assert mock_download.mock_calls[0][1][1] == temp_path
assert mock_download.mock_calls[0][1][2] == 'myhash'
assert mock_download.mock_calls[0][1][3] is True
def test_install_collections_from_tar(collection_artifact, monkeypatch):
collection_path, collection_tar = collection_artifact
temp_path = os.path.split(collection_tar)[0]
shutil.rmtree(collection_path)
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
collection.install_collections([(to_text(collection_tar), '*', None, None)], to_text(temp_path),
[u'https://galaxy.ansible.com'], True, False, False, False, False)
assert os.path.isdir(collection_path)
actual_files = os.listdir(collection_path)
actual_files.sort()
assert actual_files == [b'FILES.json', b'MANIFEST.json', b'README.md', b'docs', b'playbooks', b'plugins', b'roles',
b'runme.sh']
with open(os.path.join(collection_path, b'MANIFEST.json'), 'rb') as manifest_obj:
actual_manifest = json.loads(to_text(manifest_obj.read()))
assert actual_manifest['collection_info']['namespace'] == 'ansible_namespace'
assert actual_manifest['collection_info']['name'] == 'collection'
assert actual_manifest['collection_info']['version'] == '0.1.0'
# Filter out the progress cursor display calls.
display_msgs = [m[1][0] for m in mock_display.mock_calls if 'newline' not in m[2] and len(m[1]) == 1]
assert len(display_msgs) == 4
assert display_msgs[0] == "Process install dependency map"
assert display_msgs[1] == "Starting collection install process"
assert display_msgs[2] == "Installing 'ansible_namespace.collection:0.1.0' to '%s'" % to_text(collection_path)
def test_install_collections_existing_without_force(collection_artifact, monkeypatch):
collection_path, collection_tar = collection_artifact
temp_path = os.path.split(collection_tar)[0]
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
# If we don't delete collection_path it will think the original build skeleton is installed so we expect a skip
collection.install_collections([(to_text(collection_tar), '*', None, None)], to_text(temp_path),
[u'https://galaxy.ansible.com'], True, False, False, False, False)
assert os.path.isdir(collection_path)
actual_files = os.listdir(collection_path)
actual_files.sort()
assert actual_files == [b'README.md', b'docs', b'galaxy.yml', b'playbooks', b'plugins', b'roles', b'runme.sh']
# Filter out the progress cursor display calls.
display_msgs = [m[1][0] for m in mock_display.mock_calls if 'newline' not in m[2] and len(m[1]) == 1]
assert len(display_msgs) == 3
assert display_msgs[0] == "Process install dependency map"
assert display_msgs[1] == "Starting collection install process"
assert display_msgs[2] == "Skipping 'ansible_namespace.collection' as it is already installed"
for msg in display_msgs:
assert 'WARNING' not in msg
def test_install_missing_metadata_warning(collection_artifact, monkeypatch):
collection_path, collection_tar = collection_artifact
temp_path = os.path.split(collection_tar)[0]
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
for file in [b'MANIFEST.json', b'galaxy.yml']:
b_path = os.path.join(collection_path, file)
if os.path.isfile(b_path):
os.unlink(b_path)
collection.install_collections([(to_text(collection_tar), '*', None, None)], to_text(temp_path),
[u'https://galaxy.ansible.com'], True, False, False, False, False)
display_msgs = [m[1][0] for m in mock_display.mock_calls if 'newline' not in m[2] and len(m[1]) == 1]
assert 'WARNING' in display_msgs[0]
# Makes sure we don't get stuck in some recursive loop
@pytest.mark.parametrize('collection_artifact', [
{'ansible_namespace.collection': '>=0.0.1'},
], indirect=True)
def test_install_collection_with_circular_dependency(collection_artifact, monkeypatch):
collection_path, collection_tar = collection_artifact
temp_path = os.path.split(collection_tar)[0]
shutil.rmtree(collection_path)
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
collection.install_collections([(to_text(collection_tar), '*', None, None)], to_text(temp_path),
[u'https://galaxy.ansible.com'], True, False, False, False, False)
assert os.path.isdir(collection_path)
actual_files = os.listdir(collection_path)
actual_files.sort()
assert actual_files == [b'FILES.json', b'MANIFEST.json', b'README.md', b'docs', b'playbooks', b'plugins', b'roles',
b'runme.sh']
with open(os.path.join(collection_path, b'MANIFEST.json'), 'rb') as manifest_obj:
actual_manifest = json.loads(to_text(manifest_obj.read()))
assert actual_manifest['collection_info']['namespace'] == 'ansible_namespace'
assert actual_manifest['collection_info']['name'] == 'collection'
assert actual_manifest['collection_info']['version'] == '0.1.0'
# Filter out the progress cursor display calls.
display_msgs = [m[1][0] for m in mock_display.mock_calls if 'newline' not in m[2] and len(m[1]) == 1]
assert len(display_msgs) == 4
assert display_msgs[0] == "Process install dependency map"
assert display_msgs[1] == "Starting collection install process"
assert display_msgs[2] == "Installing 'ansible_namespace.collection:0.1.0' to '%s'" % to_text(collection_path)
assert display_msgs[3] == "ansible_namespace.collection (0.1.0) was installed successfully"
|
Fale/ansible
|
test/units/galaxy/test_collection_install.py
|
Python
|
gpl-3.0
| 35,033
|
[
"Galaxy"
] |
1eb02215ada37c180c54959633605c62688d7ea8b1bb9ce18ae38c97aec847d5
|
import matplotlib.pyplot as plt
import os
from astropy.table import Table
import numpy as np
# setup information sources
degas = Table.read(os.path.join(os.environ['SCRIPTDIR'],'degas_base.fits'))
stack = Table.read('/lustre/cv/users/akepley/degas/stack_test/stack_IR6p0_mom1.fits')
plotDir = os.path.join(os.environ['ANALYSISDIR'],'plots','fdense_plots')
if not os.path.exists(plotDir):
os.mkdir(plotDir)
# only look at dr1 galaxies
dr1 = degas['DR1'] == 1
ndr1 = np.sum(dr1)
# setup plot style
markers = ['o','v','^','s','*','D'] # 6 items
colors = ['royalblue','forestgreen','darkorange','royalblue','crimson','rebeccapurple','darkcyan','darkmagenta']
ndr1 = np.sum(dr1)
markerlist = np.tile(markers,int(np.ceil(ndr1/len(markers))))
markerlist = markerlist[0:ndr1]
colorlist = np.tile(colors,int(np.ceil(ndr1/len(colors))))
colorlist = colorlist[0:ndr1]
# set up plot
fig = plt.figure(figsize=(8,6),facecolor='white',edgecolor='white')
fig.subplots_adjust(left=0.1,right=0.8,bottom=0.1, top=0.9)
ax = fig.add_subplot(1,1,1)
# for each dr1 galaxy, show radial trends for each line.
for (galaxy,color,marker) in zip(degas[dr1],colorlist,markerlist):
idx = ( (stack['galaxy'] == galaxy['NAME']) \
& (stack['bin_type'] == 'stellarmass'))
mstar = stack[idx]['bin_mean']
lolims = stack[idx]['ratio_HCN_CO_lolim']
fdense = stack[idx]['ratio_HCN_CO']
fdense_err = stack[idx]['ratio_HCN_CO_err']
fdense_err[lolims] = fdense[lolims] * 0.3
ax.errorbar(mstar, fdense,
yerr = fdense_err,
uplims = lolims,
marker = marker,
markerfacecolor='none',
markeredgecolor=color,
linestyle= '--',
color=color)
ax.scatter(mstar[~lolims], fdense[~lolims],
marker=marker,
color=color,
label=galaxy['NAME'])
ax.set_yscale('log')
ax.set_xscale('log')
ax.legend(loc='upper left',bbox_to_anchor=(1.0,1.0))
plt.xlabel(r'$\Sigma_{*}$ (M$_\odot$ pc$^{-2}$)')
ax.set_ylabel(r'log$_{10}$ (HCN-to-CO)')
fig.show()
fig.savefig(os.path.join(plotDir,'fdense_vs_mstar_combined.pdf'))
fig.savefig(os.path.join(plotDir,'fdense_vs_mstar_combined.png'))
plt.close()
|
low-sky/degas
|
scripts/plot_fdense_vs_mstar_combined.py
|
Python
|
gpl-3.0
| 2,305
|
[
"Galaxy"
] |
0bde8cff7f6f6b9ecb03fdcbc11f42fa6af7369dafe66444b93c2a1bf8063a91
|
# -*- coding: utf-8 -*-
"""
Interpolate sounding data onto a regular grid
"""
import gzip
from scipy import spatial
import numpy as np
from maptools import ll2utm, readShpBathy, readDEM
from kriging import kriging
from netCDF4 import Dataset
from scipy.sparse import coo_matrix
from scipy.interpolate import griddata
import time
import matplotlib.pyplot as plt
# testing stuff
import pdb
class demBuilder(object):
"Class for building a DEM on a regular cartesian grid from different inputs"""
## Properties ##
infile = 'C:/Projects/GOMGalveston/DATA/Bathymetry/NOSHyrdrographicSurveys/xyz/NOS_Galveston_Survey_Depths.csv.gz'
# Projection information
convert2utm=True
CS='NAD83'
utmzone=15
isnorth=True
vdatum = 'MSL'
# DEM Grid params
dx=25
bbox = [-95.45,-94.44,28.8,29.8]
# Interpolation options
interptype = 'nn' # One of 'nn', 'blockavg', 'idw'
maxdist=200
NNear = 3 # Number of points to include in interpolation (only applicable to idw and kriging)
p = 1.0 # power for inverse distance weighting
# kriging options
varmodel = 'spherical'
nugget = 0.1
sill = 0.8
vrange = 250.0
def __init__(self,**kwargs):
self.__dict__.update(kwargs)
# Check if the input file is not a list
T = type(self.infile)
if T!=list:
self.multifile=False
# Read in the array
print 'Reading data from: %s...'%self.infile
if self.infile[-3:]=='.gz':
LL,self.Zin = read_xyz_gz(self.infile)
if self.infile[-3:]=='txt':
LL,self.Zin = read_xyz(self.infile)
elif self.infile[-3:]=='shp':
LL,self.Zin = readShpBathy(self.infile)
elif self.infile[-3:]=='dem':
LL,self.Zin = readDEM(self.infile,True)
if self.infile[-3:]=='.nc':
self.loadnc(fv=2)
LL = self._returnXY()
self.Zin = np.ravel(self.Zin)
self.npt = len(self.Zin)
if self.convert2utm:
if self.bbox==None:
# Work out the domain limits from the input file
pdb.set_trace()
self.bbox = [LL[:,0].min(),LL[:,0].max(),LL[:,1].min(),LL[:,1].max()]
else:
# Clip the points outside of the domain
print 'Clipping points outside of the bounding box...'
LL=self.clipPoints(LL)
# Convert the coordinates
print 'Transforming the coordinates to UTM...'
self.XY=ll2utm(LL,self.utmzone,self.CS,self.isnorth)
else:
self.XY=LL
else: # Multiple files
self.multifile=True
# Create the grid object
self.grd = Grid(self.bbox,self.dx,self.dx,utmzone=self.utmzone,CS=self.CS,isnorth=self.isnorth)
def build(self):
tic=time.clock()
if self.multifile==False:
if self.interptype=='nn':
print 'Building DEM with Nearest Neighbour interpolation...'
self.nearestNeighbour()
elif self.interptype=='blockavg':
print 'Building DEM with Block Averaging...'
self.blockAvg()
elif self.interptype=='idw':
print 'Building DEM with Inverse Distance Weighted Interpolation...'
self.invdistweight()
elif self.interptype=='kriging':
print 'Building DEM with Kriging Interpolation...'
self.krig()
elif self.interptype=='griddata':
print 'Building DEM using griddata...'
self.griddata()
else:
print 'Error - Unknown interpolation type: %s.'%self.interptype
else: # Multiple file interpolation
print 'Multiple input files detected - setting "interptype" to "blockavg".'
self.interptype = 'blockavg'
self.Z = np.zeros((self.grd.ny,self.grd.nx))
self.N = np.zeros((self.grd.ny,self.grd.nx))
ctr=0
for f in self.infile:
ctr+=1
# Read in the array
print 'Reading data file (%d of %d): %s...'%(ctr,len(self.infile),f)
if f[-3:]=='.gz':
LL,self.Zin = read_xyz_gz(f)
if f[-3:]=='txt':
LL,self.Zin = read_xyz(f)
elif f[-3:]=='shp':
LL,self.Zin = readShpBathy(f)
elif f[-3:]=='dem':
LL,self.Zin = readDEM(f,True)
self.npt = len(self.Zin)
if self.convert2utm:
# Clip the points outside of the domain
#print 'Clipping points outside of the bounding box...'
#LL=self.clipPoints(LL)
# Convert the coordinates
print 'Transforming the coordinates to UTM...'
self.XY=ll2utm(LL,self.utmzone,self.CS,self.isnorth)
else:
self.XY=LL
del LL
# Interpolate
print 'Building DEM with Block Averaging...'
self.blockAvgMulti()
# Memory cleanup
del self.XY
del self.Zin
# Compute the block average for all of the files
self.Z = np.divide(self.Z,self.N)
toc=time.clock()
print 'Elapsed time %10.3f seconds.'%(toc-tic)
def _returnXY(self):
"""
Returns gridded points as a vector
"""
X,Y = np.meshgrid(self.xgrd,self.ygrd)
return np.column_stack((np.ravel(X),np.ravel(Y)))
def clipPoints(self,LL):
""" Clips points outside of the bounding box"""
X = LL[:,0]
Y = LL[:,1]
ind = np.all([X>=self.bbox[0],X<=self.bbox[1],Y>=self.bbox[2],Y<=self.bbox[3]],axis=0)
print 'Clipped %d points.'%(self.npt-sum(ind))
self.Zin = self.Zin[ind]
self.npt = len(self.Zin)
return np.concatenate((np.reshape(X[ind],(self.npt,1)),np.reshape(Y[ind],(self.npt,1))),axis=1)
def nearestNeighbour(self):
""" Nearest neighbour interpolation algorithm
Sets any points outside of maxdist to NaN
"""
MAXSIZE = 10e6
nchunks = np.ceil(self.grd.npts*self.NNear/MAXSIZE)
if nchunks == 1:
Z = nn(self.XY,self.Zin,self.grd.ravel(),maxdist=self.maxdist)
else:
pt1,pt2=tile_vector(int(self.grd.npts),int(nchunks))
Z = np.zeros((self.grd.npts,))
XYout = self.grd.ravel()
for p1,p2 in zip(pt1,pt2):
print 'Interpolating tile %d to %d of %d...'%(p1,p2,self.grd.npts)
Z[p1:p2] = nn(self.XY,self.Zin,XYout[p1:p2,:],maxdist=self.maxdist)
self.Z = np.reshape(Z,(self.grd.ny,self.grd.nx))
def griddata(self):
"""Wrapper for griddata"""
print 'Interpolating %d data points'%self.npt
self.Z = griddata((self.XY[:,0],self.XY[:,1]), self.Zin, (self.grd.X, self.grd.Y), method='linear')
def blockAvg(self):
"""Block averaging interpolation"""
# Get the grid indices
J,I = self.grd.returnij(self.XY[:,0],self.XY[:,1])
# Average onto the grid
Z = np.zeros((self.grd.ny,self.grd.nx))
N = np.zeros((self.grd.ny,self.grd.nx))
ctr=-1
for jj,ii in zip(J,I):
ctr+=1
if jj != -1 and ii != -1:
Z[jj,ii] += self.Zin[ctr]
N[jj,ii] += 1.0
self.Z = np.divide(Z,N)
self.N = N
def blockAvgMulti(self):
"""Block averaging interpolation"""
print 'Interpolating %d data points'%self.npt
# Get the grid indices
J,I = self.grd.returnij(self.XY[:,0],self.XY[:,1])
# Zero out of bound points
sumpts = np.ones((self.npt,))
ind = I==-1
self.Zin[ind]=0.0
I[ind]=0
sumpts[ind]=0
ind = J==-1
self.Zin[ind]=0.0
J[ind]=0
sumpts[ind]=0
# Use the sparse matrix library for accumulation
self.Z += coo_matrix((np.ravel(self.Zin),(J,I)),\
shape=(self.grd.ny,self.grd.nx)).todense()
self.N += coo_matrix((sumpts,(J,I)),\
shape=(self.grd.ny,self.grd.nx)).todense()
## # Average onto the grid
## ctr=-1
## for jj,ii in zip(J,I):
## ctr+=1
## if jj != -1 and ii != -1:
## self.Z[jj,ii] += self.Zin[ctr]
## self.N[jj,ii] += 1.0
def invdistweight(self):
""" Inverse distance weighted interpolation """
# Break it down into smaller chunks
MAXSIZE = 10e6
nchunks = np.ceil(self.grd.npts*self.NNear/MAXSIZE)
if nchunks == 1:
Z=idw(self.XY,self.Zin,self.grd.ravel(),maxdist=self.maxdist,NNear=self.NNear,p=self.p)
else:
pt1,pt2=tile_vector(int(self.grd.npts),int(nchunks))
Z = np.zeros((self.grd.npts,))
XYout = self.grd.ravel()
for p1,p2 in zip(pt1,pt2):
print 'Interpolating tile %d to %d of %d...'%(p1,p2,self.grd.npts)
Z[p1:p2]=idw(self.XY,self.Zin,XYout[p1:p2,:],maxdist=self.maxdist,NNear=self.NNear,p=self.p)
self.Z = np.reshape(Z,(self.grd.ny,self.grd.nx))
def krig(self):
""" Kriging interpolation"""
# Break it down into smaller chunks
MAXSIZE = 15e6
nchunks = np.ceil(self.grd.npts*self.NNear/MAXSIZE)
if nchunks == 1:
self.Finterp = kriging(self.XY,self.grd.ravel(),maxdist=self.maxdist,NNear=self.NNear)
Z = self.Finterp(self.Zin)
else:
pt1,pt2=tile_vector(int(self.grd.npts),int(nchunks))
Z = np.zeros((self.grd.npts,))
XYout = self.grd.ravel()
for p1,p2 in zip(pt1,pt2):
print 'Interpolating tile %d to %d of %d...'%(p1,p2,self.grd.npts)
self.Finterp = kriging(self.XY,XYout[p1:p2,:],maxdist=self.maxdist,NNear=self.NNear)
Z[p1:p2] = self.Finterp(self.Zin)
self.Z = np.reshape(Z,(self.grd.ny,self.grd.nx))
def loadnc(self,fv=1):
""" Load the DEM data from a netcdf file"""
nc = Dataset(self.infile, 'r')
try:
self.xgrd = nc.variables['X'][:]
self.ygrd = nc.variables['Y'][:]
self.Zin = nc.variables['topo'][:]
except:
self.xgrd = nc.variables['x'][:]
self.ygrd = nc.variables['x'][:]
self.Zin = nc.variables['z'][:]
nc.close()
self.xgrd=self.xgrd[::fv]
self.ygrd=self.ygrd[::fv]
self.Zin=self.Zin[::fv,::fv]
def save(self,outfile='DEM.nc'):
""" Saves the DEM to a netcdf file"""
# Create the global attributes
if self.isnorth:
proj = "UTM %d (%s) in northern hemisphere."%(self.utmzone,self.CS)
else:
proj = "UTM %d (%s) in southern hemisphere."%(self.utmzone,self.CS)
intparamstr = 'Interpolation Type: %s, Number of neighbours: %d, Maximum search distance: %3.1f m'%(self.interptype,self.NNear,self.maxdist)
if self.interptype=='idw':
intparamstr += ', IDW power: %2.1f'%self.p
elif self.interptype=='kriging':
intparamstr += ', Variogram model: %s, sill: %3.1f, nugget: %3.1f, range: %3.1f'%(self.varmodel,self.sill,self.nugget,self.vrange)
globalatts = {'title':'DEM model',\
'history':'Created on '+time.ctime(),\
'Input dataset':self.infile,\
'Projection':proj,\
'Interpolation Parameters':intparamstr}
nc = Dataset(outfile, 'w', format='NETCDF4')
# Write the global attributes
for gg in globalatts.keys():
nc.setncattr(gg,globalatts[gg])
# Create the dimensions
dimnamex = 'nx'
dimlength = self.grd.nx
nc.createDimension(dimnamex,dimlength)
dimnamey = 'ny'
dimlength = self.grd.ny
nc.createDimension(dimnamey,dimlength)
# Create the lat lon variables
tmpvarx=nc.createVariable('X','f8',(dimnamex,))
tmpvary=nc.createVariable('Y','f8',(dimnamey,))
tmpvarx[:] = self.grd.X[0,:]
tmpvary[:] = self.grd.Y[:,0]
# Create the attributes
tmpvarx.setncattr('long_name','Easting')
tmpvarx.setncattr('units','metres')
tmpvary.setncattr('long_name','Northing')
tmpvary.setncattr('units','metres')
# Write the topo data
tmpvarz=nc.createVariable('topo','f8',(dimnamey,dimnamex),zlib=True,least_significant_digit=1)
tmpvarz[:] = self.Z
tmpvarz.setncattr('long_name','Topographic elevation')
tmpvarz.setncattr('units','metres')
tmpvarz.setncattr('coordinates','X, Y')
tmpvarz.setncattr('positive','up')
tmpvarz.setncattr('datum',self.vdatum)
nc.close()
print 'DEM save to %s.'%outfile
def plot(self,**kwargs):
h= plt.figure(figsize=(9,8))
#h.imshow(np.flipud(self.Z),extent=[bbox[0],bbox[1],bbox[3],bbox[2]])
plt.imshow(np.flipud(self.Z),extent=[self.grd.x0,self.grd.x1,self.grd.y0,self.grd.y1],**kwargs)
plt.colorbar()
return h
def scatter(self,**kwargs):
fig= plt.figure(figsize=(9,8))
#h.imshow(np.flipud(self.Z),extent=[bbox[0],bbox[1],bbox[3],bbox[2]])
plt.scatter(np.ravel(self.grd.X),np.ravel(self.grd.Y),c=np.ravel(self.Z),s=10,**kwargs)
plt.colorbar()
return fig
def contourf(self,vv=range(-10,0),**kwargs):
fig= plt.figure(figsize=(9,8))
#h.imshow(np.flipud(self.Z),extent=[bbox[0],bbox[1],bbox[3],bbox[2]])
plt.contourf(self.grd.X,self.grd.Y,self.Z,vv,**kwargs)
plt.colorbar()
plt.axis('equal')
return fig
class Grid(object):
""" Cartesian grid object"""
CS='NAD83'
utmzone=15
isnorth=True
def __init__(self,bbox,dx,dy,**kwargs):
self.__dict__.update(kwargs)
# Generate the grid
xy0 = ll2utm([bbox[0],bbox[2]],self.utmzone,self.CS,self.isnorth)
xy1 = ll2utm([bbox[1],bbox[3]],self.utmzone,self.CS,self.isnorth)
self.x0 = xy0[0,0]
self.y0 = xy0[0,1]
self.x1 = xy1[0,0]
self.y1 = xy1[0,1]
self.dx=dx
self.dy=dy
xgrd = np.arange(self.x0,self.x1,dx)
ygrd = np.arange(self.y0,self.y1,dy)
self.nx = len(xgrd)
self.ny = len(ygrd)
self.npts = self.nx*self.ny
self.X,self.Y = np.meshgrid(xgrd,ygrd)
def ravel(self):
""" Returns the grid coordinates as a vector"""
return np.concatenate( (np.reshape(np.ravel(self.X),(self.npts,1)),\
np.reshape(np.ravel(self.Y),(self.npts,1))),axis=1)
def returnij(self,x,y):
"""
Returns the grid cell indices that points x,y reside inside of.
"""
I = np.ceil( (x-self.x0)/self.dx)
J =np.ceil( (y-self.y0)/self.dy)
J = np.array(J,dtype=int)
I = np.array(I,dtype=int)
# blank out bad cells
J[J<0]=-1
J[J>self.ny-1]=-1
I[I<0]=-1
I[I>self.nx-1]=-1
return J,I
## Other functions that don't need to be in a class ##
def read_xyz_gz(fname):
# Read the raw data into an array
f = gzip.open(fname,'r')
npts = line_count(f)-1
XY = np.zeros((npts,2))
Z = np.zeros((npts,1))
ii=-1
for line in f:
ii+=1
if ii > 0:
xyz = line.split(', ')
XY[ii-1,0] = float(xyz[0])
XY[ii-1,1] = float(xyz[1])
Z[ii-1,0] = float(xyz[2])
f.close()
return XY,Z
def read_xyz(fname):
# Read the raw data into an array
f =open(fname,'r')
npts = line_count(f)-1
XY = np.zeros((npts,2))
Z = np.zeros((npts,1))
ii=-1
for line in f:
ii+=1
if ii > 0:
xyz = line.split(', ')
XY[ii-1,0] = float(xyz[0])
XY[ii-1,1] = float(xyz[1])
Z[ii-1,0] = float(xyz[2])
f.close()
return XY,Z
def line_count(f):
for i, l in enumerate(f):
pass
# try:
# f.rewind()
# except ValueError:
# f.seek(0,0)
f.seek(0,0)
return i + 1
def tile_vector(count,chunks):
rem = np.remainder(count,chunks)
cnt2 = count-rem
dx = cnt2/chunks
if count != cnt2:
pt1 = range(0,cnt2,dx)
pt2 = range(dx,cnt2,dx) + [count]
else:
pt1 = range(0,count-dx,dx)
pt2 = range(dx,count,dx)
return pt1,pt2
def idw(XYin,Zin,XYout,maxdist=300,NNear=3,p=1):
"""Inverse distance weighted interpolation function"""
# Compute the spatial tree
kd = spatial.cKDTree(XYin)
# Perform query on all of the points in the grid
dist,ind=kd.query(XYout,distance_upper_bound=maxdist,k=NNear)
# Calculate the weights
W = 1/dist**p
Wsum = np.sum(W,axis=1)
for ii in range(NNear):
W[:,ii] = W[:,ii]/Wsum
# create the mask
mask = (dist==np.inf)
ind[mask]=1
# Fill the array and resize it
Zraw = np.squeeze(Zin[ind])
# Compute the weighted sums and mask the blank points
return np.sum(Zraw*W,axis=1)
#Z[mask]=np.nan
def nn(XYin,Zin,XYout,maxdist=300):
""" Nearest neighbour interpolation algorithm
Sets any points outside of maxdist to NaN
"""
# Compute the spatial tree
kd = spatial.cKDTree(XYin)
# Perform query on all of the points in the grid
dist,ind=kd.query(XYout,distance_upper_bound=maxdist)
# create the mask
mask = (dist==np.inf)
ind[mask]=1
# Fill the array and resize it
Z = Zin[ind]
Z[mask]=np.nan
return(Z)
################
# Testing sections
#dem = demBuilder(dx=100,interptype='kriging',maxdist=500,NNear=6,vrange=200)
#dem = demBuilder(dx=50,interptype='idw',maxdist=500,NNear=3)
#dem.build()
#
#ncfile = 'C:/Projects/GOMGalveston/DATA/Bathymetry/DEMs/testDEM.nc'
#dem.save(ncfile)
#
#f=dem.contourf(vv=range(-15,1),vmin=-15,vmax=0,cmap=plt.cm.gist_earth)
#f.savefig(ncfile[:-2]+'pdf')
#dem.plot(vmin=-15,vmax=0,cmap=plt.cm.gist_earth)
#dem.scatter(vmin=-15,vmax=0,cmap=plt.cm.gist_earth)
#infile = 'E:/Projects/GOMGalveston/DATA/Bathymetry/LIDAR/USACE_2009/29094_48_43_raw.txt'
#LL,Z = read_xyz(infile)
|
UT-CWE/Hyospy
|
Hyospy_ensemble/lib/SUNTANS/GIS/demBuilder.py
|
Python
|
mit
| 19,965
|
[
"NetCDF"
] |
fa428ca03c3b5b9e600f3fa0fcd7b01fea10aa1fea0480a64febfd24a31da8c5
|
"""Mission Pinball Framework Media Controller (mpf-mc) setup.py.
Notes:
This setup script is a modified/customized version of the Kivy setup.py script.
"""
import sys
import re
from copy import deepcopy
import os
from os.path import join, dirname, sep, exists, isdir
from os import walk, environ
from distutils.version import LooseVersion
from distutils.sysconfig import get_python_inc
from collections import OrderedDict
from time import sleep
from sysconfig import get_paths
from setuptools import setup, Extension
print('Using setuptools')
# fix error with py3's LooseVersion comparisons
def ver_equal(self, other):
return self.version == other
LooseVersion.__eq__ = ver_equal
MIN_CYTHON_STRING = '0.24'
MIN_CYTHON_VERSION = LooseVersion(MIN_CYTHON_STRING)
MAX_CYTHON_STRING = '0.29.21'
MAX_CYTHON_VERSION = LooseVersion(MAX_CYTHON_STRING)
CYTHON_UNSUPPORTED = (
# ref https://github.com/cython/cython/issues/1968
'0.27', '0.27.2'
)
CYTHON_REQUIRES_STRING = (
'cython>={min_version},<={max_version},{exclusion}'.format(
min_version=MIN_CYTHON_STRING,
max_version=MAX_CYTHON_STRING,
exclusion=','.join('!=%s' % excl for excl in CYTHON_UNSUPPORTED),
)
)
PACKAGE_FILES_ALLOWED_EXT = ('py', 'yaml', 'png', 'md', 'zip', 'gif', 'jpg',
'mp4', 'm4v', 'so', 'pyd', 'dylib', 'wav', 'ogg',
'pxd', 'pyx', 'c', 'h', 'ttf', 'fnt', 'txt')
on_rtd = os.environ.get('READTHEDOCS') == 'True'
def getoutput(cmd, env=None):
# pylint: disable-msg=import-outside-toplevel
import subprocess
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, env=env)
p.wait()
if p.returncode: # if not returncode == 0
print('WARNING: A problem occurred while running {0} (code {1})\n'
.format(cmd, p.returncode))
stderr_content = p.stderr.read()
if stderr_content:
print('{0}\n'.format(stderr_content))
return ""
return p.stdout.read()
def pkgconfig(*packages, **kw):
flag_map = {'-I': 'include_dirs', '-L': 'library_dirs', '-l': 'libraries'}
lenviron = None
pconfig = join(sys.prefix, 'libs', 'pkgconfig')
if isdir(pconfig):
lenviron = environ.copy()
lenviron['PKG_CONFIG_PATH'] = '{};{}'.format(
environ.get('PKG_CONFIG_PATH', ''), pconfig)
cmd = 'pkg-config --libs --cflags {}'.format(' '.join(packages))
results = getoutput(cmd, lenviron).split()
for token in results:
extension = token[:2].decode('utf-8')
flag = flag_map.get(extension)
if not flag:
continue
kw.setdefault(flag, []).append(token[2:].decode('utf-8'))
return kw
def get_isolated_env_paths():
try:
# sdl2_dev is installed before setup.py is run, when installing from
# source due to pyproject.toml. However, it is installed to a
# pip isolated env, which we need to add to compiler
# pylint: disable-msg=import-outside-toplevel
import kivy_deps.sdl2_dev as sdl2_dev
except ImportError:
return [], []
sdl_root = os.path.abspath(join(sdl2_dev.__path__[0], '../../../..'))
includes = [join(sdl_root, 'Include')] if isdir(join(sdl_root, 'Include')) else []
libs = [join(sdl_root, 'libs')] if isdir(join(sdl_root, 'libs')) else []
return includes, libs
# -----------------------------------------------------------------------------
# Determine on which platform we are
platform = sys.platform
# Detect 32/64bit for OSX (http://stackoverflow.com/a/1405971/798575)
if sys.platform == 'darwin':
if sys.maxsize > 2 ** 32:
osx_arch = 'x86_64'
else:
osx_arch = 'i386'
# Detect Python for android project (http://github.com/kivy/python-for-android)
ndkplatform = environ.get('NDKPLATFORM')
if ndkplatform is not None and environ.get('LIBLINK'):
platform = 'android'
kivy_ios_root = environ.get('KIVYIOSROOT', None)
if kivy_ios_root is not None:
platform = 'ios'
if exists('/opt/vc/include/bcm_host.h'):
platform = 'rpi'
if exists('/usr/lib/arm-linux-gnueabihf/libMali.so'):
platform = 'mali'
# -----------------------------------------------------------------------------
# Detect options
#
c_options = OrderedDict()
c_options['use_rpi'] = platform == 'rpi'
c_options['use_mali'] = platform == 'mali'
c_options['use_sdl2'] = True
c_options['use_gstreamer'] = True
c_options['use_avfoundation'] = platform == 'darwin'
c_options['use_osx_frameworks'] = platform == 'darwin'
# now check if environ is changing the default values
for key in list(c_options.keys()):
ukey = key.upper()
if ukey in environ:
value = bool(int(environ[ukey]))
print('Environ change {0} -> {1}'.format(key, value))
c_options[key] = value
# -----------------------------------------------------------------------------
# Cython check
# Cython usage is optional (.c files are included to build without Cython)
#
cython_unsupported_append = '''
Please note that the following versions of Cython are not supported
at all: {}
'''.format(', '.join(map(str, CYTHON_UNSUPPORTED)))
cython_min = '''\
This version of Cython is not compatible with MPF-MC. Please upgrade to
at least version {0}, preferably the newest supported version {1}.
If your platform provides a Cython package, make sure you have upgraded
to the newest version. If the newest version available is still too low,
please remove it and install the newest supported Cython via pip:
pip install -I Cython=={1}{2}\
'''.format(MIN_CYTHON_STRING, MAX_CYTHON_STRING,
cython_unsupported_append if CYTHON_UNSUPPORTED else '')
cython_max = '''\
This version of Cython is untested with MPF-MC. While this version may
work perfectly fine, it is possible that you may experience issues. If
you do have issues, please downgrade to a supported version. It is
best to use the newest supported version, {1}, but the minimum
supported version is {0}.
If your platform provides a Cython package, check if you can downgrade
to a supported version. Otherwise, uninstall the platform package and
install Cython via pip:
pip install -I Cython=={1}{2}\
'''.format(MIN_CYTHON_STRING, MAX_CYTHON_STRING,
cython_unsupported_append if CYTHON_UNSUPPORTED else '')
cython_unsupported = '''\
This version of Cython suffers from known bugs and is unsupported.
Please install the newest supported version, {1}, if possible, but
the minimum supported version is {0}.
If your platform provides a Cython package, check if you can install
a supported version. Otherwise, uninstall the platform package and
install Cython via pip:
pip install -I Cython=={1}{2}\
'''.format(MIN_CYTHON_STRING, MAX_CYTHON_STRING,
cython_unsupported_append)
have_cython = False
skip_cython = environ.get('USE_CYTHON', False) not in ['1', 'True', 'TRUE', 'true', 'Yes', 'YES', 'y', 'Y']
if skip_cython:
print("\nSkipping Cython build (using .c files)")
else:
try:
# check for cython
from Cython.Distutils import build_ext
have_cython = True
import Cython
cy_version_str = Cython.__version__
cy_ver = LooseVersion(cy_version_str)
print('\nDetected Cython version {}'.format(cy_version_str))
if cy_ver < MIN_CYTHON_VERSION:
print(cython_min)
raise ImportError('Incompatible Cython Version')
if cy_ver in CYTHON_UNSUPPORTED:
print(cython_unsupported)
raise ImportError('Incompatible Cython Version')
if cy_ver > MAX_CYTHON_VERSION:
print(cython_max)
sleep(1)
except ImportError:
print("\nCython is missing and the USE_CYTHON environment variable is set to True!\n\n")
raise
if not have_cython:
from distutils.command.build_ext import build_ext # noqa
# -----------------------------------------------------------------------------
# Setup classes
# the build path where kivy is being compiled
src_path = build_path = dirname(__file__)
class CustomBuildExt(build_ext):
def finalize_options(self):
# pylint: disable-msg=assignment-from-no-return
retval = build_ext.finalize_options(self)
global build_path # noqa
if (self.build_lib is not None and exists(self.build_lib) and
not self.inplace):
build_path = self.build_lib
return retval
def build_extensions(self):
c = self.compiler.compiler_type
print('Detected compiler is {}'.format(c))
if c != 'msvc':
for e in self.extensions:
e.extra_link_args += ['-lm']
build_ext.build_extensions(self)
def _check_and_fix_sdl2_mixer(f_path_to_check):
# Between SDL_mixer 2.0.1 and 2.0.4, the included frameworks changed
# smpeg2 have been replaced with mpg123, but there is no need to fix.
smpeg2_path = ("{}/Versions/A/Frameworks/smpeg2.framework"
"/Versions/A/smpeg2").format(f_path_to_check)
if not exists(smpeg2_path):
return
print("Check if SDL2_mixer smpeg2 have an @executable_path")
rpath_from = ("@executable_path/../Frameworks/SDL2.framework"
"/Versions/A/SDL2")
rpath_to = "@rpath/../../../../SDL2.framework/Versions/A/SDL2"
output = getoutput(("otool -L '{}'").format(smpeg2_path)).decode('utf-8')
if "@executable_path" not in output:
return
print("WARNING: Your SDL2_mixer version is invalid")
print("WARNING: The smpeg2 framework embedded in SDL2_mixer contains a")
print("WARNING: reference to @executable_path that will fail the")
print("WARNING: execution of your application.")
print("WARNING: We are going to change:")
print("WARNING: from: {}".format(rpath_from))
print("WARNING: to: {}".format(rpath_to))
getoutput("install_name_tool -change {} {} {}".format(
rpath_from, rpath_to, smpeg2_path))
output = getoutput(("otool -L '{}'").format(smpeg2_path))
if b"@executable_path" not in output:
print("WARNING: Change successfully applied!")
print("WARNING: You'll never see this message again.")
else:
print("WARNING: Unable to apply the changes, sorry.")
gst_flags = {}
if platform == 'darwin':
if c_options['use_osx_frameworks']:
if osx_arch == "i386":
print("Warning: building with frameworks fail on i386")
else:
print("OSX framework used, force to x86_64 only")
environ["ARCHFLAGS"] = environ.get("ARCHFLAGS", "-arch x86_64")
print("OSX ARCHFLAGS are: {}".format(environ["ARCHFLAGS"]))
# detect gstreamer, only on desktop
# works if we forced the options or in autodetection
if c_options['use_gstreamer'] in (None, True):
gstreamer_valid = False
if c_options['use_osx_frameworks'] and platform == 'darwin':
# check the existence of frameworks
f_path = '/Library/Frameworks/GStreamer.framework'
if not exists(f_path):
c_options['use_gstreamer'] = False
print('GStreamer framework not found, fallback on pkg-config')
else:
print('GStreamer framework found')
gstreamer_valid = True
c_options['use_gstreamer'] = True
gst_flags = {
'extra_link_args': [
'-F/Library/Frameworks',
'-Xlinker', '-rpath',
'-Xlinker', '/Library/Frameworks',
'-Xlinker', '-headerpad',
'-Xlinker', '190',
'-framework', 'GStreamer'],
'include_dirs': [join(f_path, 'Headers')]}
elif platform == 'win32':
gst_flags = pkgconfig('gstreamer-1.0')
if 'libraries' in gst_flags:
print('GStreamer found via pkg-config')
gstreamer_valid = True
c_options['use_gstreamer'] = True
else:
_includes = get_isolated_env_paths()[0] + [get_paths()['include']]
for include_dir in _includes:
if exists(join(include_dir, 'gst', 'gst.h')):
print('GStreamer found via gst.h')
gstreamer_valid = True
c_options['use_gstreamer'] = True
gst_flags = {
'libraries':
['gstreamer-1.0', 'glib-2.0', 'gobject-2.0']}
break
if not gstreamer_valid:
# use pkg-config approach instead
gst_flags = pkgconfig('gstreamer-1.0')
if 'libraries' in gst_flags:
print('GStreamer found via pkg-config')
gstreamer_valid = True
c_options['use_gstreamer'] = True
if not gstreamer_valid:
raise RuntimeError('GStreamer not found and is required to build MPF-MC')
# detect SDL2
# works if we forced the options or in autodetection
sdl2_flags = {}
if c_options['use_sdl2'] in (None, True):
sdl2_valid = False
if c_options['use_osx_frameworks'] and platform == 'darwin':
# check the existence of frameworks
sdl2_valid = True
sdl2_flags = {
'extra_link_args': [
'-F/Library/Frameworks',
'-Xlinker', '-rpath',
'-Xlinker', '/Library/Frameworks',
'-Xlinker', '-headerpad',
'-Xlinker', '190'],
'include_dirs': [],
'extra_compile_args': ['-F/Library/Frameworks']
}
for name in ('SDL2', 'SDL2_image', 'SDL2_mixer'):
f_path = '/Library/Frameworks/{}.framework'.format(name)
if not exists(f_path):
print('Missing framework {}'.format(f_path))
sdl2_valid = False
continue
sdl2_flags['extra_link_args'] += ['-framework', name]
sdl2_flags['include_dirs'] += [join(f_path, 'Headers')]
print('Found sdl2 frameworks: {}'.format(f_path))
if name == 'SDL2_mixer':
_check_and_fix_sdl2_mixer(f_path)
if not sdl2_valid:
c_options['use_sdl2'] = False
print('SDL2 frameworks not found, fallback on pkg-config')
else:
c_options['use_sdl2'] = True
print('Activate SDL2 compilation')
if not sdl2_valid and platform != "ios":
# use pkg-config approach instead
sdl2_flags = pkgconfig('sdl2', 'SDL2_image', 'SDL2_mixer')
if 'libraries' in sdl2_flags:
print('SDL2 found via pkg-config')
c_options['use_sdl2'] = True
# -----------------------------------------------------------------------------
# declare flags
def get_modulename_from_file(filename_to_check):
filename_to_check = filename_to_check.replace(sep, '/')
pyx = '.'.join(filename_to_check.split('.')[:-1])
pyxl = pyx.split('/')
while pyxl[0] != 'mpfmc':
pyxl.pop(0)
if pyxl[1] == 'mpfmc':
pyxl.pop(0)
return '.'.join(pyxl)
def expand(root_path, *args):
return join(root_path, 'mpfmc', *args)
class CythonExtension(Extension):
def __init__(self, *args, **kwargs):
Extension.__init__(self, *args, **kwargs)
self.cython_directives = {
'c_string_encoding': 'utf-8',
'profile': 'USE_PROFILE' in environ,
'embedsignature': environ.get('USE_EMBEDSIGNATURE', '0') == 1,
'language_level': 3,
'unraisable_tracebacks': True}
# XXX with pip, setuptools is imported before distutils, and change
# our pyx to c, then, cythonize doesn't happen. So force again our
# sources
self.sources = args[1]
def merge(d1, *args):
d1 = deepcopy(d1)
for d2 in args:
for item_key, item_value in d2.items():
item_value = deepcopy(item_value)
if item_key in d1:
d1[item_key].extend(item_value)
else:
d1[item_key] = item_value
return d1
def determine_base_flags():
flags = {
'libraries': [],
'include_dirs': [join(src_path, 'kivy', 'include')],
'library_dirs': [],
'extra_link_args': [],
'extra_compile_args': []}
if platform.startswith('freebsd'):
flags['include_dirs'] += [join(
environ.get('LOCALBASE', '/usr/local'), 'include')]
flags['library_dirs'] += [join(
environ.get('LOCALBASE', '/usr/local'), 'lib')]
elif platform == 'darwin':
v = os.uname()
if v[2] >= '13.0.0':
# use xcode-select to search on the right Xcode path
# XXX use the best SDK available instead of a specific one
# pylint: disable-msg=import-outside-toplevel
import platform as _platform
xcode_dev = getoutput('xcode-select -p').splitlines()[0]
sdk_mac_ver = '.'.join(_platform.mac_ver()[0].split('.')[:2])
print('Xcode detected at {}, and using OS X{} sdk'.format(
xcode_dev, sdk_mac_ver))
sysroot = join(
xcode_dev.decode('utf-8'),
'Platforms/MacOSX.platform/Developer/SDKs',
'MacOSX{}.sdk'.format(sdk_mac_ver),
'System/Library/Frameworks')
else:
sysroot = ('/System/Library/Frameworks/'
'ApplicationServices.framework/Frameworks')
flags['extra_compile_args'] += ['-F%s' % sysroot]
flags['extra_link_args'] += ['-F%s' % sysroot]
elif platform == 'win32':
flags['include_dirs'] += [get_python_inc(prefix=sys.prefix)]
flags['library_dirs'] += [join(sys.prefix, "libs")]
return flags
def determine_sdl2():
flags = {}
if not c_options['use_sdl2']:
return flags
sdl2_path = environ.get('KIVY_SDL2_PATH', None)
if sdl2_flags and not sdl2_path and platform == 'darwin':
return sdl2_flags
includes, _ = get_isolated_env_paths()
# no pkgconfig info, or we want to use a specific sdl2 path, so perform
# manual configuration
flags['libraries'] = ['SDL2', 'SDL2_image', 'SDL2_mixer']
split_chr = ';' if platform == 'win32' else ':'
sdl2_paths = sdl2_path.split(split_chr) if sdl2_path else []
if not sdl2_paths:
sdl2_paths = []
for include in includes + [join(sys.prefix, 'include')]:
sdl_inc = join(include, 'SDL2')
if isdir(sdl_inc):
sdl2_paths.append(sdl_inc)
sdl2_paths.extend(['/usr/local/include/SDL2', '/usr/include/SDL2'])
flags['include_dirs'] = sdl2_paths
flags['extra_link_args'] = []
flags['extra_compile_args'] = []
flags['library_dirs'] = (
sdl2_paths if sdl2_paths else
['/usr/local/lib/'])
if sdl2_flags:
flags = merge(flags, sdl2_flags)
# ensure headers for all the SDL2 and sub libraries are available
libs_to_check = ['SDL', 'SDL_mixer', 'SDL_image']
can_compile = True
for lib in libs_to_check:
found = False
for d in flags['include_dirs']:
inc_dir = join(d, '{}.h'.format(lib))
if exists(inc_dir):
found = True
print('SDL2: found {} header at {}'.format(lib, inc_dir))
break
if not found:
print('SDL2: missing sub library {}'.format(lib))
can_compile = False
if not can_compile:
c_options['use_sdl2'] = False
return {}
return flags
base_flags = determine_base_flags()
# -----------------------------------------------------------------------------
# sources to compile
sources = {
'core/audio/sound_file.pyx': {
'depends': ['core/audio/sdl2_helper.h', 'core/audio/gstreamer_helper.h']},
'core/audio/track.pyx': {
'depends': ['core/audio/sdl2_helper.h', 'core/audio/gstreamer_helper.h']},
'core/audio/track_standard.pyx': {
'depends': ['core/audio/sdl2_helper.h', 'core/audio/gstreamer_helper.h']},
'core/audio/track_sound_loop.pyx': {
'depends': ['core/audio/sdl2_helper.h', 'core/audio/gstreamer_helper.h']},
'core/audio/audio_interface.pyx': {
'depends': ['core/audio/sdl2_helper.h', 'core/audio/gstreamer_helper.h']},
'core/audio/playlist_controller.pyx': {},
'uix/bitmap_font/bitmap_font.pyx': {'depends': ['core/audio/sdl2.pxi', ]}
}
if c_options["use_sdl2"] and not on_rtd:
sdl2_flags = determine_sdl2()
else:
sdl2_flags = {}
if sdl2_flags:
for source_file, depends in sources.items():
sources[source_file] = merge(
base_flags, gst_flags, sdl2_flags, depends)
# -----------------------------------------------------------------------------
# extension modules
def get_extensions_from_sources(sources_to_search):
ext_modules_found = []
for pyx, flags in sources_to_search.items():
pyx = expand(src_path, pyx)
depends_sources = [expand(src_path, x) for x in flags.pop('depends', [])]
c_depends = [expand(src_path, x) for x in flags.pop('c_depends', [])]
if not have_cython:
pyx = '%s.c' % pyx[:-4]
f_depends = [x for x in depends_sources if x.rsplit('.', 1)[-1] in (
'c', 'cpp', 'm')]
module_name = get_modulename_from_file(pyx)
flags_clean = {'depends': depends_sources}
for item_key, item_value in flags.items():
if item_value:
flags_clean[item_key] = item_value
ext_modules_found.append(CythonExtension(
module_name, [pyx] + f_depends + c_depends, **flags_clean))
return ext_modules_found
print(sources)
if not on_rtd:
ext_modules = get_extensions_from_sources(sources)
else:
ext_modules = []
# -----------------------------------------------------------------------------
# Get the version number of mpf-mc and the required version of MPF by reading
# the file directly. We can't import it because that would import mpf and
# break the setup. Details here:
# http://stackoverflow.com/questions/458550/standard-way-to-embed-version
# -into-python-package
version_file = "mpfmc/_version.py"
version_file_content = open(version_file, "rt").read()
version_re = r"^__version__ = ['\"]([^'\"]*)['\"]"
mo = re.search(version_re, version_file_content, re.M)
if mo:
mc_version = mo.group(1)
else:
raise RuntimeError(
"Unable to find version string in %s." % (version_file,))
# This section pulls the MPF required version from the mpf-mc version file so
# we can write that as a requirement below
mpf_version_re = r"^__mpf_version_required__ = ['\"]([^'\"]*)['\"]"
mo = re.search(mpf_version_re, version_file_content, re.M)
if mo:
mpf_version = mo.group(1)
else:
raise RuntimeError("Unable to find MPF version string in %s." % (
version_file,))
install_requires = ['ruamel.yaml==0.15.100', # better YAML library
'mpf>={}'.format(mpf_version),
'kivy==2.0.0',
'psutil==5.7.3',
'Pygments==2.6.1', # YAML syntax formatting for the iMC
# also update those in appveyor.yaml if you change versions
'kivy_deps.sdl2==0.3.1;platform_system=="Windows"',
'kivy_deps.sdl2-dev==0.3.1;platform_system=="Windows"',
'kivy_deps.glew==0.3.0;platform_system=="Windows"',
'kivy_deps.glew-dev==0.3.0;platform_system=="Windows"',
'kivy_deps.gstreamer==0.3.1;platform_system=="Windows"',
'kivy_deps.gstreamer-dev==0.3.1;platform_system=="Windows"',
'ffpyplayer==4.3.2'
]
# If we're running on Read The Docs, then we just need to copy the files
# (since mpf-docs uses the test YAML files in the doc build), and we don't
# need to actually install mpf-mc, so override the installation requirements:
if on_rtd:
install_requires = []
# -----------------------------------------------------------------------------
# automatically detect package files
package_files = dict(mpfmc=list())
for root, _, files in walk('mpfmc'):
for fn in files:
ext = fn.split('.')[-1].lower()
if ext not in PACKAGE_FILES_ALLOWED_EXT:
continue
filename = join(root, fn)
directory = dirname(filename)
package_files['mpfmc'].append('/'.join(filename.split(os.sep)[1:]))
# -----------------------------------------------------------------------------
# setup !
setup(
name='mpf-mc',
version=mc_version,
author='The Mission Pinball Framework Team',
author_email='brian@missionpinball.org',
url='http://missionpinball.org',
license='MIT',
description='Mission Pinball Framework Media Controller',
long_description='''Graphics, video, and audio engine for the
Mission Pinball Framework.
The Mission Pinball Framework Media Controller (MPF-MC) is a component
of the Mission Pinball Framework (MPF) that controls graphics and
sound, including dot matrix displays (DMDs), LCD displays, and color
RGB LED displays.
(The MPF media controller architecture is modular, so you can use this
MPF-MC package or another one.)
The MPF-MC is built on Kivy and leverages SDL2, OpenGL, and
GPU-accelerated hardware.
MPF is a work-in-progress that is not yet complete, though we're
actively developing it and checking in several commits a week. It's
MIT licensed, actively developed by fun people, and supported by a
vibrant pinball-loving community.''',
keywords='pinball',
ext_modules=ext_modules,
cmdclass={'build_ext': CustomBuildExt},
packages=['mpfmc'],
package_dir={'mpfmc': 'mpfmc'},
package_data=package_files,
zip_safe=False,
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Natural Language :: English',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: Linux',
'Topic :: Artistic Software',
'Topic :: Games/Entertainment :: Arcade'
],
install_requires=install_requires,
tests_require=[],
entry_points='''
[mpf.config_player]
sound_player=mpfmc.config_players.plugins.sound_player:register_with_mpf
sound_loop_player=mpfmc.config_players.plugins.sound_loop_player:register_with_mpf
playlist_player=mpfmc.config_players.plugins.playlist_player:register_with_mpf
widget_player=mpfmc.config_players.plugins.widget_player:register_with_mpf
slide_player=mpfmc.config_players.plugins.slide_player:register_with_mpf
track_player=mpfmc.config_players.plugins.track_player:register_with_mpf
display_light_player=mpfmc.config_players.plugins.display_light_player:register_with_mpf
[mpf.command]
mc=mpfmc.commands.mc:get_command
imc=mpfmc.commands.imc:get_command
''',
setup_requires=[CYTHON_REQUIRES_STRING] if not skip_cython else [])
|
missionpinball/mpf-mc
|
setup.py
|
Python
|
mit
| 27,376
|
[
"Brian"
] |
c3987cc750ae6fd5b2ea59d02ce596b60386123756e3c60ae31ba4e297dd5d96
|
# APRP.py: module containing functions, etc. related to the Approximate Partial Radiation Perturbation method
# (Taylor et al., 2007). Based on Matlab code written by Yen-Ting Hwang.
#
# To run, import this module into an outside script and then run "aprp_main" for
# the desired model
import numpy as np
import netCDF4 as nc4
#Main function to run, for an individual model (which model is specified via the "dataPaths" arguments).
#This version assumes variable names and dimensions following CMIP convention.
#
#Inputs:
# dataPaths1: dictionary of paths to the netCDF output for time period 1
# firstMonth1: first month (indexed from beginning of output) for time period 1--note Python indices start with 0
# lastMonth1: last month (indexed from beginning of output) for time period 1
# dataPaths2: dictionary of paths to the netCDF output for time period 2
# (if the two states being compared are different times from the same run, make this the same as dataPaths1)
# firstMonth2: first month (indexed from beginning of output) for time period 2
# lastMonth2: last month (indexed from beginning of output) for time period 2
#
#Outputs:
# A dictionary of dictionaries:
# returnDict['APRP']: contains results from comparing the two time periods (see "d_albedo" function for list of variables)
# returnDict['Time1_preliminaries']: contains the relevant model output variables, having been read in from NetCDF
# files, and processed including cloudy-sky calculations and multiannual means
# returnDict['Time2_preliminaries']: same as above but for time period 2 (see "loadNetCDF" function for list of variables)
# returnDict['Time1_parameters']: contains tuning parameters for the idealized single-layer radiative transfer model
# returnDict['Time2_parameters']: same as above but for time period 2 (see "parameters" function for list of variables)
# Syntax for accessing: e.g. to get the radiative effect of surface albedo changes, type returnDict['APRP']['surface']
#
def aprp_main(dataPaths1, firstMonth1, lastMonth1, dataPaths2, firstMonth2, lastMonth2):
#Load files and run calculations for first time period
dict1A = loadNetCDF(dataPaths1, firstMonth1, lastMonth1)
dict1B = parameters(dict1A)
#Load files and run calculations for second time period
dict2A = loadNetCDF(dataPaths2, firstMonth2, lastMonth2)
dict2B = parameters(dict2A)
#Run calculations regarding change betweeen 2 time periods
dictC = d_albedo(dict1A, dict1B, dict2A, dict2B)
#Nest the dictionaries into an outside dictionary to return
returnDict = dict()
returnDict['APRP'] = dictC
returnDict['Time1_preliminaries'] = dict1A
returnDict['Time1_parameters'] = dict1B
returnDict['Time2_preliminaries'] = dict2A
returnDict['Time2_parameters'] = dict2B
return returnDict
#Load variables from netCDF files (run twice, once for each time) and calculate overcast sky data.
#Based on "load_nc_coupled.m" in Ting's code.
#Inputs: see aprp_main
#Outputs: a dictionary containing monthly mean SW fluxes at surface and TOA under all-sky, clear-sky and
# overcast conditions, as well as the model's latitude and longitude grids. These are monthly mean
# data; unlike in Ting's code, not doing multi-annual mean yet. Better to do APRP calculations on
# the individual months.
def loadNetCDF(dataPaths, firstMonth, lastMonth):
#Variable names from CMIP convention (dictionary of data paths should have labels corresponding to these)
#variables = ['rsds', 'rsus', 'rsut', 'rsdt', 'rsutcs', 'rsdscs', 'rsuscs', 'clt']
#For each of the variables, import the netCDF file and extract array from the netCDF Dataset object, subsetted
#...by the times specified in the arguments (Ting used a loop with "eval" but I'd rather avoid that
#...for readability) and mask values greater than 10^10
Dataset = nc4.Dataset(dataPaths['rsds'])
rsds = Dataset.variables['rsds'][firstMonth:lastMonth+1, :,:]
rsds = np.ma.masked_greater(rsds,1.e10)
Dataset = nc4.Dataset(dataPaths['rsus'])
rsus = Dataset.variables['rsus'][firstMonth:lastMonth+1, :,:]
rsus = np.ma.masked_greater(rsus,1.e10)
Dataset = nc4.Dataset(dataPaths['rsut'])
rsut = Dataset.variables['rsut'][firstMonth:lastMonth+1, :,:]
rsut = np.ma.masked_greater(rsut,1.e10)
Dataset = nc4.Dataset(dataPaths['rsdt'])
rsdt = Dataset.variables['rsdt'][firstMonth:lastMonth+1, :,:]
rsdt = np.ma.masked_greater(rsdt,1.e10)
Dataset = nc4.Dataset(dataPaths['rsutcs'])
rsutcs = Dataset.variables['rsutcs'][firstMonth:lastMonth+1, :,:]
rsutcs = np.ma.masked_greater(rsutcs,1.e10)
Dataset = nc4.Dataset(dataPaths['rsdscs'])
rsdscs = Dataset.variables['rsdscs'][firstMonth:lastMonth+1, :,:]
rsdscs = np.ma.masked_greater(rsdscs,1.e10)
Dataset = nc4.Dataset(dataPaths['rsuscs'])
rsuscs = Dataset.variables['rsuscs'][firstMonth:lastMonth+1, :,:]
rsuscs = np.ma.masked_greater(rsuscs,1.e10)
Dataset = nc4.Dataset(dataPaths['clt'])
clt = Dataset.variables['clt'][firstMonth:lastMonth+1, :,:]
clt = np.ma.masked_greater(clt,1.e10)
#Alternative to the repetitive code above: shorter but harder to read/debug
#for variable in variables:
# Dataset = nc4.Dataset(dataPaths[variable])
# eval(variable+'= Dataset.variables['+variable+'][firstMonth:lastMonth+1, :,:]')
# eval(variable+'= np.ma.masked_greater('+variable+', 1.e10)')
#Obtain the latitude and longitude for the model (using last Dataset in the loop which should still be available)
lat = Dataset.variables['lat'][:]
lon = Dataset.variables['lon'][:]
#Here Ting calculated multi-year means for individual months. I need to do this too.
#Dimensions are time, lat, lon
#Need to average over every 12th time element, leave the lat and lon dependence.
#Will end up with a 3D array whose dimensions are month (1-12), lat, lon.
#What is best way to do this?
#Ting looped over the 12 months.
#She also saved separate 1-month means, but never used them so I'll skip that for now.
numMonths = lastMonth - firstMonth + 1
m_rsds = np.zeros([12,len(lat),len(lon)])
m_rsus = np.zeros([12,len(lat),len(lon)])
m_rsut = np.zeros([12,len(lat),len(lon)])
m_rsdt = np.zeros([12,len(lat),len(lon)])
m_rsutcs = np.zeros([12,len(lat),len(lon)])
m_rsdscs = np.zeros([12,len(lat),len(lon)])
m_rsuscs = np.zeros([12,len(lat),len(lon)])
m_clt = np.zeros([12,len(lat),len(lon)])
for i in range(0,12):
m_rsds[i,:,:] = np.mean(rsds[i:numMonths:12,:,:], axis=0)
m_rsus[i,:,:] = np.mean(rsus[i:numMonths:12,:,:], axis=0)
m_rsut[i,:,:] = np.mean(rsut[i:numMonths:12,:,:], axis=0)
m_rsdt[i,:,:] = np.mean(rsdt[i:numMonths:12,:,:], axis=0)
m_rsutcs[i,:,:] = np.mean(rsutcs[i:numMonths:12,:,:], axis=0)
m_rsdscs[i,:,:] = np.mean(rsdscs[i:numMonths:12,:,:], axis=0)
m_rsuscs[i,:,:] = np.mean(rsuscs[i:numMonths:12,:,:], axis=0)
m_clt[i,:,:] = np.mean(clt[i:numMonths:12,:,:], axis=0)
#Calculate the overcast versions of rsds, rsus, rsut from the clear-sky and all-sky data
#First mask zero values of cloud fraction so you don't calculate overcast values in clear-sky pixels
m_clt = np.ma.masked_values(m_clt, 0)
c = m_clt/100. #c is cloud fraction. clt was in percentages
m_rsdsoc = (m_rsds-(1.-c)*(m_rsdscs))/c #Can derive this algebraically from Taylor et al., 2007, Eq. 3
m_rsusoc = (m_rsus-(1.-c)*(m_rsuscs))/c
m_rsutoc = (m_rsut-(1.-c)*(m_rsutcs))/c
#Mask zero values of the downward SW radiation (I assume this means polar night, for monthly mean)
m_rsds = np.ma.masked_values(m_rsds, 0)
m_rsdscs = np.ma.masked_values(m_rsdscs, 0)
m_rsdsoc = np.ma.masked_values(m_rsdsoc, 0)
m_rsdt = np.ma.masked_values(m_rsdt, 0)
#Return dictionary with all the variables calculated here (called "dictA" because calculated in first function called)
dictA = dict()
dictA['rsds'] = m_rsds
dictA['rsus'] = m_rsus
dictA['rsut'] = m_rsut
dictA['rsdt'] = m_rsdt
dictA['rsutcs'] = m_rsutcs
dictA['rsdscs'] = m_rsdscs
dictA['rsuscs'] = m_rsuscs
dictA['clt'] = m_clt
dictA['lat'] = lat
dictA['lon'] = lon
dictA['rsdsoc'] = m_rsdsoc
dictA['rsusoc'] = m_rsusoc
dictA['rsutoc'] = m_rsutoc
dictA['c'] = c #Cloud fraction as fraction, not %
return dictA
#Calculate the tuning parameters for the idealized single-layer radiative transfer model
#for the individual time period (i.e. control or warmed)
#See Figure 1 of Taylor et al., 2007, and other parts of that paper. Equations referenced are from there.
#
#Based on Ting's "parameters.m".
#
#Inputs: the dictionary output by loadNetCDF
#Outputs: a dictionary of additional outputs
def parameters(dictA):
#Clear-sky parameters
a_clr = dictA['rsuscs']/dictA['rsdscs'] #Surface albedo
Q = dictA['rsdscs']/dictA['rsdt'] #Ratio of incident surface flux to insolation
mu_clr = dictA['rsutcs']/dictA['rsdt']+Q*(1.-a_clr) #Atmospheric transmittance (Eq. 9) #"Invalid value in divide"
ga_clr = (mu_clr-Q)/(mu_clr-a_clr*Q) #Atmospheric scattering coefficient (Eq. 10)
#Overcast parameters
a_oc = dictA['rsusoc']/dictA['rsdsoc'] #Surface albedo
Q = dictA['rsdsoc']/dictA['rsdt'] #Ratio of incident surface flux to insolation
mu_oc = dictA['rsutoc']/dictA['rsdt']+Q*(1.-a_oc) #Atmospheric transmittance (Eq. 9)
ga_oc = (mu_oc-Q)/(mu_oc-a_oc*Q) #Atmospheric scattering coefficient (Eq. 10)
#Calculating cloudy parameters based on clear-sky and overcast ones
#Difference between _cld and _oc: _cld is due to the cloud itself, as opposed to
#scattering and absorption from all constituents including clouds in overcast skies.
mu_cld = mu_oc / mu_clr #Eq. 14
ga_cld = (ga_oc-1.)/(1.-ga_clr)+1. #Eq. 13
#Save the relevant variables to a dictionary for later use
dictB = dict()
dictB['a_clr'] = a_clr
dictB['a_oc'] = a_oc
dictB['mu_clr'] = mu_clr
dictB['mu_cld'] = mu_cld
dictB['ga_clr'] = ga_clr
dictB['ga_cld'] = ga_cld
#Ting saved a cloud fraction variable here--I did this in earlier function instead.
return dictB
#Calculations for the differences between time periods
def d_albedo(dict1A, dict1B, dict2A, dict2B):
#First, Ting set cloud values that were masked in one time period
#equal to the value in the other time period, assuming no cloud changes.
#I'll take these variables out of the dictionary before modifying them.
a_oc1 = dict1B['a_oc']
a_oc2 = dict2B['a_oc']
a_oc2[a_oc2.mask == True] = a_oc1[a_oc2.mask == True]
a_oc1[a_oc1.mask == True] = a_oc2[a_oc1.mask == True]
mu_cld1 = dict1B['mu_cld']
mu_cld2 = dict2B['mu_cld']
mu_cld2[mu_cld2.mask == True] = mu_cld1[mu_cld2.mask == True]
mu_cld1[mu_cld1.mask == True] = mu_cld2[mu_cld1.mask == True]
ga_cld1 = dict1B['ga_cld']
ga_cld2 = dict2B['ga_cld']
ga_cld2[ga_cld2.mask == True] = ga_cld1[ga_cld2.mask == True]
ga_cld1[ga_cld1.mask == True] = ga_cld2[ga_cld1.mask == True]
#Now a bunch of calls to the "albedo" function to see how the albedo changes as a result of
#...the changes to each of the radiative components.
#Retrieve other variables from dictionaries to make calls to albedo shorter/more readable
c1 = dict1A['c']
c2 = dict2A['c']
a_clr1 = dict1B['a_clr']
a_clr2 = dict2B['a_clr']
mu_clr1 = dict1B['mu_clr']
mu_clr2 = dict2B['mu_clr']
ga_clr1 = dict1B['ga_clr']
ga_clr2 = dict2B['ga_clr']
#Base state albedo
A1 = albedo(c1, a_clr1, a_oc1, mu_clr1, mu_cld1, ga_clr1, ga_cld1)
A2 = albedo(c2, a_clr2, a_oc2, mu_clr2, mu_cld2, ga_clr2, ga_cld2)
#Change in albedo due to each component (Taylor et al., 2007, Eq. 12b)
dA_c = .5*(albedo(c2, a_clr1, a_oc1, mu_clr1, mu_cld1, ga_clr1, ga_cld1)-A1)+.5*(
A2-albedo(c1, a_clr2, a_oc2, mu_clr2, mu_cld2, ga_clr2, ga_cld2))
dA_a_clr = .5*(albedo(c1, a_clr2, a_oc1, mu_clr1, mu_cld1, ga_clr1, ga_cld1)-A1)+.5*(
A2-albedo(c2, a_clr1, a_oc2, mu_clr2, mu_cld2, ga_clr2, ga_cld2))
dA_a_oc = .5*(albedo(c1, a_clr1, a_oc2, mu_clr1, mu_cld1, ga_clr1, ga_cld1)-A1)+.5*(
A2-albedo(c2, a_clr2, a_oc1, mu_clr2, mu_cld2, ga_clr2, ga_cld2))
dA_mu_clr = .5*(albedo(c1, a_clr1, a_oc1, mu_clr2, mu_cld1, ga_clr1, ga_cld1)-A1)+.5*(
A2-albedo(c2, a_clr2, a_oc2, mu_clr1, mu_cld2, ga_clr2, ga_cld2))
dA_mu_cld = .5*(albedo(c1, a_clr1, a_oc1, mu_clr1, mu_cld2, ga_clr1, ga_cld1)-A1)+.5*(
A2-albedo(c2, a_clr2, a_oc2, mu_clr2, mu_cld1, ga_clr2, ga_cld2))
dA_ga_clr = .5*(albedo(c1, a_clr1, a_oc1, mu_clr1, mu_cld1, ga_clr2, ga_cld1)-A1)+.5*(
A2-albedo(c2, a_clr2, a_oc2, mu_clr2, mu_cld2, ga_clr1, ga_cld2))
dA_ga_cld = .5*(albedo(c1, a_clr1, a_oc1, mu_clr1, mu_cld1, ga_clr1, ga_cld2)-A1)+.5*(
A2-albedo(c2, a_clr2, a_oc2, mu_clr2, mu_cld2, ga_clr2, ga_cld1))
#Set changes due to overcast or cloudy sky parameters, or changes to clouds themselves, to zero
#...if cloud fraction is less than 3% in either time period
dA_a_oc[dict1A['c'] < .03] = 0
dA_a_oc[dict2A['c'] < .03] = 0
dA_mu_cld[dict1A['c'] < .03] = 0
dA_mu_cld[dict2A['c'] < .03] = 0
dA_ga_cld[dict1A['c'] < .03] = 0
dA_ga_cld[dict2A['c'] < .03] = 0
dA_c[dict1A['c'] < .03] = 0
dA_c[dict2A['c'] < .03] = 0
#Combine different components into changes due to surface albedo, atmospheric clear-sky and atmospheric cloudy-sky
dA_a = dA_a_clr + dA_a_oc #Eq. 16a
dA_cld = dA_mu_cld + dA_ga_cld + dA_c #Eq. 16b
dA_clr = dA_mu_clr + dA_ga_clr #Eq. 16c
#Set all planetary albedo changes = zero when incoming solar radaition is zero
#(This will replace NaNs with zeros in the polar night--affects annual means)
dA_a[dict2A['rsdt']<0.1] = 0
dA_clr[dict2A['rsdt']<0.1] = 0
dA_cld[dict2A['rsdt']<0.1] = 0
dA_a_clr[dict2A['rsdt']<0.1] = 0
dA_a_oc[dict2A['rsdt']<0.1] = 0
dA_mu_cld[dict2A['rsdt']<0.1] = 0
dA_ga_cld[dict2A['rsdt']<0.1] = 0
dA_c[dict2A['rsdt']<0.1] = 0
dA_mu_clr[dict2A['rsdt']<0.1] = 0
dA_ga_clr[dict2A['rsdt']<0.1] = 0
#Calculate radiative effects in W/m^2 by multiplying negative of planetary albedo changes by downward SW radation
#(This means positive changes mean more downward SW absorbed)
surface = -dA_a*dict2A['rsdt'] #Radiative effect of surface albedo changes
surface[dict2A['rsdt']<0.1] = 0
surface = np.ma.masked_outside(surface, -100, 100) # Ting called this "boundary for strange output"
cloud = -dA_cld*dict2A['rsdt'] #Radiative effect of cloud changes
cloud[dict2A['rsdt']<0.1] = 0
cloud = np.ma.masked_outside(cloud, -100, 100) # Ting called this "boundary for strange output"
noncloud = -dA_clr*dict2A['rsdt'] #Radiative effect of non-cloud SW changes (e.g. SW absorption)
noncloud[dict2A['rsdt']<0.1] = 0
#Broken down further into the individual terms in Eq. 16
surface_clr = -dA_a_clr*dict2A['rsdt'] #Effects of surface albedo in clear-sky conditions
surface_clr[dict2A['rsdt']<0.1] = 0
surface_oc = -dA_a_oc*dict2A['rsdt'] #Effects of surface albedo in overcast conditions
surface_oc[dict2A['rsdt']<0.1] = 0
cloud_c = -dA_c*dict2A['rsdt'] #Effects of changes in cloud fraction
cloud_c[dict2A['rsdt']<0.1] = 0
cloud_ga = -dA_ga_cld*dict2A['rsdt'] #Effects of atmospheric scattering in cloudy conditions
cloud_ga[dict2A['rsdt']<0.1] = 0
cloud_mu = -dA_mu_cld*dict2A['rsdt'] #Effects of atmospheric absorption in cloudy conditions
cloud_mu[dict2A['rsdt']<0.1] = 0
noncloud_ga = -dA_ga_clr*dict2A['rsdt'] #Effects of atmospheric scattering in clear-sky conditions
noncloud_ga[dict2A['rsdt']<0.1] = 0
noncloud_mu = -dA_mu_clr*dict2A['rsdt'] #Effects of atmospheric absorption in clear-sky conditions
noncloud_mu[dict2A['rsdt']<0.1] = 0
#Calculate more useful radiation output
CRF = dict1A['rsut'] - dict1A['rsutcs'] - dict2A['rsut'] + dict2A['rsutcs'] #Change in cloud radiative effect
cs = dict1A['rsutcs'] - dict2A['rsutcs'] #Change in clear-sky upward SW flux at TOA
#Define a dictionary to return all the variables calculated here
dictC = dict()
dictC['A1'] = A1
dictC['A2'] = A2
dictC['dA_c'] = dA_c
dictC['dA_a_clr'] = dA_a_clr
dictC['dA_a_oc'] = dA_a_oc
dictC['dA_mu_clr'] = dA_mu_clr
dictC['dA_mu_cld'] = dA_mu_cld
dictC['dA_ga_clr'] = dA_ga_clr
dictC['dA_ga_cld'] = dA_ga_cld
dictC['dA_a'] = dA_a
dictC['dA_cld'] = dA_cld
dictC['dA_clr'] = dA_clr
dictC['surface'] = surface
dictC['cloud'] = cloud
dictC['noncloud'] = noncloud
dictC['surface_clr'] = surface_clr
dictC['surface_oc'] = surface_oc
dictC['cloud_c'] = cloud_c
dictC['cloud_ga'] = cloud_ga
dictC['cloud_mu'] = cloud_mu
dictC['noncloud_ga'] = noncloud_ga
dictC['noncloud_mu'] = noncloud_mu
dictC['CRF'] = CRF
dictC['cs'] = cs
return dictC
#Function to calculate the planetary albedo, A.
#Inputs: (see Fig. 1 of Taylor et al., 2007)
# c: fraction of the region occupied by clouds
# a_clr: clear sky surface albedo (SW flux up / SW flux down)
# a_oc: overcast surface albedo
# mu_clr: clear-sky transmittance of SW radiation
# mu_cld: cloudy-sky transmittance of SW radiation
# ga_clr: clear-sky atmospheric scattering coefficient
# ga_cld: cloudy-sky atmospheric scattering coefficient
def albedo(c, a_clr, a_oc, mu_clr, mu_cld, ga_clr, ga_cld): #Labeled with equation numbers from Taylor et al. 2007
mu_oc = mu_clr*mu_cld #Eq. 14
ga_oc = 1. - (1.-ga_clr)*(1.-ga_cld) #Eq. 13
A_clr = mu_clr*ga_clr + mu_clr*a_clr*(1.-ga_clr)*(1.-ga_clr)/(1.-a_clr*ga_clr) #Eq. 7 (clear-sky)
A_oc = mu_oc*ga_oc + mu_oc*a_oc*(1.-ga_oc)*(1.-ga_oc)/(1.-a_oc*ga_oc) #Eq. 7 (overcast sky)
A = (1-c)*A_clr + c*A_oc #Eq. 15
return A
#### Alternative versions for CESM model runs with different output variable names ####
#Alternative main function to run the different loading function
def aprp_main_cesm(dataPaths1, firstMonth1, lastMonth1, dataPaths2, firstMonth2, lastMonth2):
#Load files and run calculations for first time period
dict1A = loadNetCDF_cesm(dataPaths1, firstMonth1, lastMonth1)
dict1B = parameters(dict1A)
#Load files and run calculations for second time period
dict2A = loadNetCDF_cesm(dataPaths2, firstMonth2, lastMonth2)
dict2B = parameters(dict2A)
#Run calculations regarding change betweeen 2 time periods
dictC = d_albedo(dict1A, dict1B, dict2A, dict2B)
#Nest the dictionaries into an outside dictionary to return
returnDict = dict()
returnDict['APRP'] = dictC
returnDict['Time1_preliminaries'] = dict1A
returnDict['Time1_parameters'] = dict1B
returnDict['Time2_preliminaries'] = dict2A
returnDict['Time2_parameters'] = dict2B
return returnDict
#Loading function for CESM output variable names (output the same as the loadNetCDF() function)
def loadNetCDF_cesm(dataPaths, firstMonth, lastMonth):
#Variable names from CAM output (dictionary of data paths should have labels corresponding to these)
#vars_CAM = ['FSDS', 'FSNS', 'FSUTOA', 'FSNTOA', 'FSNTOAC', 'FSDSC', 'FSNSC', 'CLDTOT']
#For each variable, import the netCDF file and extract array from the netCDF Dataset object, subsetted
#...by the times specified in the arguments
Dataset = nc4.Dataset(dataPaths['FSDS'])
FSDS = Dataset.variables['FSDS'][firstMonth:lastMonth+1, :,:]
FSDS = np.ma.masked_greater(FSDS,1.e10)
Dataset = nc4.Dataset(dataPaths['FSNS'])
FSNS = Dataset.variables['FSNS'][firstMonth:lastMonth+1, :,:]
FSNS = np.ma.masked_greater(FSNS,1.e10)
Dataset = nc4.Dataset(dataPaths['FSUTOA'])
FSUTOA = Dataset.variables['FSUTOA'][firstMonth:lastMonth+1, :,:]
FSUTOA = np.ma.masked_greater(FSUTOA,1.e10)
Dataset = nc4.Dataset(dataPaths['FSNTOA'])
FSNTOA = Dataset.variables['FSNTOA'][firstMonth:lastMonth+1, :,:]
FSNTOA = np.ma.masked_greater(FSNTOA,1.e10)
Dataset = nc4.Dataset(dataPaths['FSNTOAC'])
FSNTOAC = Dataset.variables['FSNTOAC'][firstMonth:lastMonth+1, :,:]
FSNTOAC = np.ma.masked_greater(FSNTOAC,1.e10)
Dataset = nc4.Dataset(dataPaths['FSDSC'])
FSDSC = Dataset.variables['FSDSC'][firstMonth:lastMonth+1, :,:]
FSDSC = np.ma.masked_greater(FSDSC,1.e10)
Dataset = nc4.Dataset(dataPaths['FSNSC'])
FSNSC = Dataset.variables['FSNSC'][firstMonth:lastMonth+1, :,:]
FSNSC = np.ma.masked_greater(FSNSC,1.e10)
Dataset = nc4.Dataset(dataPaths['CLDTOT'])
CLDTOT = Dataset.variables['CLDTOT'][firstMonth:lastMonth+1, :,:]
CLDTOT = np.ma.masked_greater(CLDTOT,1.e10)
#Variable names from CMIP convention (used in rest of the program)
#variables = ['rsds', 'rsus', 'rsut', 'rsdt', 'rsutcs', 'rsdscs', 'rsuscs', 'clt']
#Get the variables into the CMIP format/name convention (some are already fine; some need processing)
rsds = FSDS
rsut = FSUTOA
rsdscs = FSDSC
clt = CLDTOT
rsus = FSDS - FSNS #SW: positive down: net = down - up ---> up = down - net
rsdt = FSUTOA + FSNTOA #down = net + up
rsuscs = FSDSC - FSNSC
rsutcs = rsdt - FSNTOAC #Downward SW at TOA should be same regardless of clouds.
####### from here down, same as regular loadNetCDF #######
#Obtain the latitude and longitude for the model (using last Dataset in the loop which should still be available)
lat = Dataset.variables['lat'][:]
lon = Dataset.variables['lon'][:]
#Here Ting calculated multi-year means for individual months. I need to do this too.
#Dimensions are time, lat, lon
#Need to average over every 12th time element, leave the lat and lon dependence.
#Will end up with a 3D array whose dimensions are month (1-12), lat, lon.
#What is best way to do this?
#Ting looped over the 12 months.
#She also saved separate 1-month means, but never used them so I'll skip that for now.
numMonths = lastMonth - firstMonth + 1
m_rsds = np.zeros([12,len(lat),len(lon)])
m_rsus = np.zeros([12,len(lat),len(lon)])
m_rsut = np.zeros([12,len(lat),len(lon)])
m_rsdt = np.zeros([12,len(lat),len(lon)])
m_rsutcs = np.zeros([12,len(lat),len(lon)])
m_rsdscs = np.zeros([12,len(lat),len(lon)])
m_rsuscs = np.zeros([12,len(lat),len(lon)])
m_clt = np.zeros([12,len(lat),len(lon)])
for i in range(0,12):
m_rsds[i,:,:] = np.mean(rsds[i:numMonths:12,:,:], axis=0)
m_rsus[i,:,:] = np.mean(rsus[i:numMonths:12,:,:], axis=0)
m_rsut[i,:,:] = np.mean(rsut[i:numMonths:12,:,:], axis=0)
m_rsdt[i,:,:] = np.mean(rsdt[i:numMonths:12,:,:], axis=0)
m_rsutcs[i,:,:] = np.mean(rsutcs[i:numMonths:12,:,:], axis=0)
m_rsdscs[i,:,:] = np.mean(rsdscs[i:numMonths:12,:,:], axis=0)
m_rsuscs[i,:,:] = np.mean(rsuscs[i:numMonths:12,:,:], axis=0)
m_clt[i,:,:] = np.mean(clt[i:numMonths:12,:,:], axis=0)
#Calculate the overcast versions of rsds, rsus, rsut from the clear-sky and all-sky data
#First mask zero values of cloud fraction so you don't calculate overcast values in clear-sky pixels
m_clt = np.ma.masked_values(m_clt, 0)
# c = m_clt/100. #c is cloud fraction. clt was in percentages #No-Not true in CESM output
c = m_clt
m_rsdsoc = (m_rsds-(1.-c)*(m_rsdscs))/c #Can derive this algebraically from Taylor et al., 2007, Eq. 3
m_rsusoc = (m_rsus-(1.-c)*(m_rsuscs))/c
m_rsutoc = (m_rsut-(1.-c)*(m_rsutcs))/c
#Mask zero values of the downward SW radiation (I assume this means polar night, for monthly mean)
m_rsds = np.ma.masked_values(m_rsds, 0)
m_rsdscs = np.ma.masked_values(m_rsdscs, 0)
m_rsdsoc = np.ma.masked_values(m_rsdsoc, 0)
m_rsdt = np.ma.masked_values(m_rsdt, 0)
#Return dictionary with all the variables calculated here (called "dictA" because calculated in first function called)
dictA = dict()
dictA['rsds'] = m_rsds
dictA['rsus'] = m_rsus
dictA['rsut'] = m_rsut
dictA['rsdt'] = m_rsdt
dictA['rsutcs'] = m_rsutcs
dictA['rsdscs'] = m_rsdscs
dictA['rsuscs'] = m_rsuscs
dictA['clt'] = m_clt
dictA['lat'] = lat
dictA['lon'] = lon
dictA['rsdsoc'] = m_rsdsoc
dictA['rsusoc'] = m_rsusoc
dictA['rsutoc'] = m_rsutoc
dictA['c'] = c #Cloud fraction as fraction, not %
return dictA
|
rdrussotto/pyAPRP
|
APRP.py
|
Python
|
mit
| 25,480
|
[
"NetCDF"
] |
c24da678e781e645e808195b677218d9187deaba88bb088af075b896df63b41b
|
#!/usr/bin/env python
import pysam
import sys
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import argparse
ap = argparse.ArgumentParser(description="Plot a histogram of subreads based on alignments.")
ap.add_argument("bam", help="Alignments of reads.")
ap.add_argument("--image", help="Write an image of the historgram here.", default=None)
ap.add_argument("--table", help="Write the table of the histogram here.", default=None)
ap.add_argument("--dname", help="Dataset name.", default=None)
opts = ap.parse_args()
samfile = pysam.Samfile( opts.bam, "rb" )
lengths = {}
for aln in samfile.fetch():
idVals = aln.id.split('/')
holeNumber = idVals[1]
if (holeNumber not in lengths):
lengths[holeNumber] = aln.qend - aln.qstart
else:
l = aln.qend - aln.qstart
if (l <
lengths.append(aln.qend - aln.qstart)
fig = plt.figure()
ax = plt.axes()
nplengths = np.asarray(lengths)
n, bins, patches = ax.hist(nplengths, 50, facecolor='green', alpha=0.75, log=True)
plt.figtext(.60,.75, "Median " + str(np.median(nplengths)))
plt.figtext(.60,.70, "95th " + str(np.percentile(nplengths, 95)))
plt.figtext(.60,.65, "Mean " + str(np.mean(nplengths)))
plt.figtext(.60,.60, "Total " + str(np.sum(nplengths)))
ax.set_xlabel("Subread lengths")
ax.set_ylabel("count")
if (opts.dname is not None):
ax.set_title("Subread lengths " + opts.dname)
ax.grid(True)
plt.savefig(opts.image)
|
yunlongliukm/chm1_scripts
|
PlotSubreadHist.py
|
Python
|
mit
| 1,458
|
[
"pysam"
] |
e05785a8c622acb71c1fe2b99ac6f24553093bbb0003ad686ac41f46fc09e565
|
import os.path
from setuptools import setup, find_packages
HERE = os.path.abspath(os.path.dirname(__file__))
README_PATH = os.path.join(HERE, 'README.rst')
try:
README = open(README_PATH).read()
except IOError:
README = ''
setup(
name='rollbar',
packages=find_packages(),
version='0.5.7',
entry_points= {
'paste.filter_app_factory': [
'pyramid=rollbar.contrib.pyramid:create_rollbar_middleware'
]
},
description='Logs exceptions and other data to Rollbar. Provides a generic interface, as well as a Django middleware and a Pyramid tween.',
long_description=README,
author='Brian Rue',
author_email='brian@rollbar.com',
url='http://github.com/rollbar/pyrollbar',
classifiers=[
"Programming Language :: Python",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Development Status :: 4 - Beta",
"Environment :: Web Environment",
"Framework :: Django",
"Framework :: Pyramid",
"Intended Audience :: Developers",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Software Development",
"Topic :: Software Development :: Bug Tracking",
"Topic :: Software Development :: Testing",
"Topic :: Software Development :: Quality Assurance",
],
install_requires=[
'requests',
],
)
|
Stackdriver/pyrollbar
|
setup.py
|
Python
|
mit
| 1,412
|
[
"Brian"
] |
1cea3c90a3cab7c59a8ffca290b63b26009f1346c570ccce304438d01c6f4cb2
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Wrapper classes for Cif input and output from Structures.
"""
import math
import os
import re
import textwrap
import warnings
from collections import OrderedDict, deque
from functools import partial
from inspect import getfullargspec as getargspec
from io import StringIO
from itertools import groupby
from pathlib import Path
import numpy as np
from monty.io import zopen
from monty.string import remove_non_ascii
from pymatgen.core.composition import Composition
from pymatgen.core.lattice import Lattice
from pymatgen.core.operations import MagSymmOp, SymmOp
from pymatgen.core.periodic_table import DummySpecies, Element, Species, get_el_sp
from pymatgen.core.structure import Structure
from pymatgen.electronic_structure.core import Magmom
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer, SpacegroupOperations
from pymatgen.symmetry.groups import SYMM_DATA, SpaceGroup
from pymatgen.symmetry.maggroups import MagneticSpaceGroup
from pymatgen.symmetry.structure import SymmetrizedStructure
from pymatgen.util.coord import find_in_coord_list_pbc, in_coord_list_pbc
__author__ = "Shyue Ping Ong, Will Richards, Matthew Horton"
sub_spgrp = partial(re.sub, r"[\s_]", "")
space_groups = {sub_spgrp(k): k for k in SYMM_DATA["space_group_encoding"].keys()} # type: ignore
space_groups.update({sub_spgrp(k): k for k in SYMM_DATA["space_group_encoding"].keys()}) # type: ignore
_COD_DATA = None
def _get_cod_data():
global _COD_DATA
if _COD_DATA is None:
import pymatgen
with open(os.path.join(pymatgen.symmetry.__path__[0], "symm_ops.json")) as f:
import json
_COD_DATA = json.load(f)
return _COD_DATA
class CifBlock:
"""
Object for storing cif data. All data is stored in a single dictionary.
Data inside loops are stored in lists in the data dictionary, and
information on which keys are grouped together are stored in the loops
attribute.
"""
maxlen = 70 # not quite 80 so we can deal with semicolons and things
def __init__(self, data, loops, header):
"""
Args:
data: dict or OrderedDict of data to go into the cif. Values should
be convertible to string, or lists of these if the key is
in a loop
loops: list of lists of keys, grouped by which loop they should
appear in
header: name of the block (appears after the data_ on the first
line)
"""
self.loops = loops
self.data = data
# AJ says: CIF Block names cannot be more than 75 characters or you
# get an Exception
self.header = header[:74]
def __eq__(self, other):
return self.loops == other.loops and self.data == other.data and self.header == other.header
def __getitem__(self, key):
return self.data[key]
def __str__(self):
"""
Returns the cif string for the data block
"""
s = [f"data_{self.header}"]
keys = self.data.keys()
written = []
for k in keys:
if k in written:
continue
for l in self.loops:
# search for a corresponding loop
if k in l:
s.append(self._loop_to_string(l))
written.extend(l)
break
if k not in written:
# k didn't belong to a loop
v = self._format_field(self.data[k])
if len(k) + len(v) + 3 < self.maxlen:
s.append(f"{k} {v}")
else:
s.extend([k, v])
return "\n".join(s)
def _loop_to_string(self, loop):
s = "loop_"
for l in loop:
s += "\n " + l
for fields in zip(*[self.data[k] for k in loop]):
line = "\n"
for val in map(self._format_field, fields):
if val[0] == ";":
s += line + "\n" + val
line = "\n"
elif len(line) + len(val) + 2 < self.maxlen:
line += " " + val
else:
s += line
line = "\n " + val
s += line
return s
def _format_field(self, v):
v = v.__str__().strip()
if len(v) > self.maxlen:
return ";\n" + textwrap.fill(v, self.maxlen) + "\n;"
# add quotes if necessary
if v == "":
return '""'
if (" " in v or v[0] == "_") and not (v[0] == "'" and v[-1] == "'") and not (v[0] == '"' and v[-1] == '"'):
if "'" in v:
q = '"'
else:
q = "'"
v = q + v + q
return v
@classmethod
def _process_string(cls, string):
# remove comments
string = re.sub(r"(\s|^)#.*$", "", string, flags=re.MULTILINE)
# remove empty lines
string = re.sub(r"^\s*\n", "", string, flags=re.MULTILINE)
# remove non_ascii
string = remove_non_ascii(string)
# since line breaks in .cif files are mostly meaningless,
# break up into a stream of tokens to parse, rejoining multiline
# strings (between semicolons)
q = deque()
multiline = False
ml = []
# this regex splits on spaces, except when in quotes.
# starting quotes must not be preceded by non-whitespace
# (these get eaten by the first expression)
# ending quotes must not be followed by non-whitespace
p = re.compile(r"""([^'"\s][\S]*)|'(.*?)'(?!\S)|"(.*?)"(?!\S)""")
for l in string.splitlines():
if multiline:
if l.startswith(";"):
multiline = False
q.append(("", "", "", " ".join(ml)))
ml = []
l = l[1:].strip()
else:
ml.append(l)
continue
if l.startswith(";"):
multiline = True
ml.append(l[1:].strip())
else:
for s in p.findall(l):
# s is tuple. location of the data in the tuple
# depends on whether it was quoted in the input
q.append(s)
return q
@classmethod
def from_string(cls, string):
"""
Reads CifBlock from string.
:param string: String representation.
:return: CifBlock
"""
q = cls._process_string(string)
header = q.popleft()[0][5:]
data = OrderedDict()
loops = []
while q:
s = q.popleft()
# cif keys aren't in quotes, so show up in s[0]
if s[0] == "_eof":
break
if s[0].startswith("_"):
try:
data[s[0]] = "".join(q.popleft())
except IndexError:
data[s[0]] = ""
elif s[0].startswith("loop_"):
columns = []
items = []
while q:
s = q[0]
if s[0].startswith("loop_") or not s[0].startswith("_"):
break
columns.append("".join(q.popleft()))
data[columns[-1]] = []
while q:
s = q[0]
if s[0].startswith("loop_") or s[0].startswith("_"):
break
items.append("".join(q.popleft()))
n = len(items) // len(columns)
assert len(items) % n == 0
loops.append(columns)
for k, v in zip(columns * n, items):
data[k].append(v.strip())
elif "".join(s).strip() != "":
warnings.warn("Possible issue in cif file at line: {}".format("".join(s).strip()))
return cls(data, loops, header)
class CifFile:
"""
Reads and parses CifBlocks from a .cif file or string
"""
def __init__(self, data, orig_string=None, comment=None):
"""
Args:
data (OrderedDict): Of CifBlock objects.å
orig_string (str): The original cif string.
comment (str): Comment string.
"""
self.data = data
self.orig_string = orig_string
self.comment = comment or "# generated using pymatgen"
def __str__(self):
s = ["%s" % v for v in self.data.values()]
return self.comment + "\n" + "\n".join(s) + "\n"
@classmethod
def from_string(cls, string):
"""
Reads CifFile from a string.
:param string: String representation.
:return: CifFile
"""
d = OrderedDict()
for x in re.split(r"^\s*data_", "x\n" + string, flags=re.MULTILINE | re.DOTALL)[1:]:
# Skip over Cif block that contains powder diffraction data.
# Some elements in this block were missing from CIF files in
# Springer materials/Pauling file DBs.
# This block anyway does not contain any structure information, and
# CifParser was also not parsing it.
if "powder_pattern" in re.split(r"\n", x, 1)[0]:
continue
c = CifBlock.from_string("data_" + x)
d[c.header] = c
return cls(d, string)
@classmethod
def from_file(cls, filename):
"""
Reads CifFile from a filename.
:param filename: Filename
:return: CifFile
"""
with zopen(str(filename), "rt", errors="replace") as f:
return cls.from_string(f.read())
class CifParser:
"""
Parses a CIF file. Attempts to fix CIFs that are out-of-spec, but will
issue warnings if corrections applied. These are also stored in the
CifParser's errors attribute.
"""
def __init__(self, filename, occupancy_tolerance=1.0, site_tolerance=1e-4):
"""
Args:
filename (str): CIF filename, bzipped or gzipped CIF files are fine too.
occupancy_tolerance (float): If total occupancy of a site is between 1
and occupancy_tolerance, the occupancies will be scaled down to 1.
site_tolerance (float): This tolerance is used to determine if two
sites are sitting in the same position, in which case they will be
combined to a single disordered site. Defaults to 1e-4.
"""
self._occupancy_tolerance = occupancy_tolerance
self._site_tolerance = site_tolerance
if isinstance(filename, (str, Path)):
self._cif = CifFile.from_file(filename)
else:
self._cif = CifFile.from_string(filename.read())
# store if CIF contains features from non-core CIF dictionaries
# e.g. magCIF
self.feature_flags = {}
self.warnings = []
def is_magcif():
"""
Checks to see if file appears to be a magCIF file (heuristic).
"""
# Doesn't seem to be a canonical way to test if file is magCIF or
# not, so instead check for magnetic symmetry datanames
prefixes = [
"_space_group_magn",
"_atom_site_moment",
"_space_group_symop_magn",
]
for d in self._cif.data.values():
for k in d.data.keys():
for prefix in prefixes:
if prefix in k:
return True
return False
self.feature_flags["magcif"] = is_magcif()
def is_magcif_incommensurate():
"""
Checks to see if file contains an incommensurate magnetic
structure (heuristic).
"""
# Doesn't seem to be a canonical way to test if magCIF file
# describes incommensurate strucure or not, so instead check
# for common datanames
if not self.feature_flags["magcif"]:
return False
prefixes = ["_cell_modulation_dimension", "_cell_wave_vector"]
for d in self._cif.data.values():
for k in d.data.keys():
for prefix in prefixes:
if prefix in k:
return True
return False
self.feature_flags["magcif_incommensurate"] = is_magcif_incommensurate()
for k in self._cif.data.keys():
# pass individual CifBlocks to _sanitize_data
self._cif.data[k] = self._sanitize_data(self._cif.data[k])
@staticmethod
def from_string(cif_string, occupancy_tolerance=1.0):
"""
Creates a CifParser from a string.
Args:
cif_string (str): String representation of a CIF.
occupancy_tolerance (float): If total occupancy of a site is
between 1 and occupancy_tolerance, the occupancies will be
scaled down to 1.
Returns:
CifParser
"""
stream = StringIO(cif_string)
return CifParser(stream, occupancy_tolerance)
def _sanitize_data(self, data):
"""
Some CIF files do not conform to spec. This function corrects
known issues, particular in regards to Springer materials/
Pauling files.
This function is here so that CifParser can assume its
input conforms to spec, simplifying its implementation.
:param data: CifBlock
:return: data CifBlock
"""
"""
This part of the code deals with handling formats of data as found in
CIF files extracted from the Springer Materials/Pauling File
databases, and that are different from standard ICSD formats.
"""
# check for implicit hydrogens, warn if any present
if "_atom_site_attached_hydrogens" in data.data.keys():
attached_hydrogens = [str2float(x) for x in data.data["_atom_site_attached_hydrogens"] if str2float(x) != 0]
if len(attached_hydrogens) > 0:
self.warnings.append(
"Structure has implicit hydrogens defined, "
"parsed structure unlikely to be suitable for use "
"in calculations unless hydrogens added."
)
# Check to see if "_atom_site_type_symbol" exists, as some test CIFs do
# not contain this key.
if "_atom_site_type_symbol" in data.data.keys():
# Keep a track of which data row needs to be removed.
# Example of a row: Nb,Zr '0.8Nb + 0.2Zr' .2a .m-3m 0 0 0 1 14
# 'rhombic dodecahedron, Nb<sub>14</sub>'
# Without this code, the above row in a structure would be parsed
# as an ordered site with only Nb (since
# CifParser would try to parse the first two characters of the
# label "Nb,Zr") and occupancy=1.
# However, this site is meant to be a disordered site with 0.8 of
# Nb and 0.2 of Zr.
idxs_to_remove = []
new_atom_site_label = []
new_atom_site_type_symbol = []
new_atom_site_occupancy = []
new_fract_x = []
new_fract_y = []
new_fract_z = []
for idx, el_row in enumerate(data["_atom_site_label"]):
# CIF files from the Springer Materials/Pauling File have
# switched the label and symbol. Thus, in the
# above shown example row, '0.8Nb + 0.2Zr' is the symbol.
# Below, we split the strings on ' + ' to
# check if the length (or number of elements) in the label and
# symbol are equal.
if len(data["_atom_site_type_symbol"][idx].split(" + ")) > len(
data["_atom_site_label"][idx].split(" + ")
):
# Dictionary to hold extracted elements and occupancies
els_occu = {}
# parse symbol to get element names and occupancy and store
# in "els_occu"
symbol_str = data["_atom_site_type_symbol"][idx]
symbol_str_lst = symbol_str.split(" + ")
for elocc_idx, sym in enumerate(symbol_str_lst):
# Remove any bracketed items in the string
symbol_str_lst[elocc_idx] = re.sub(r"\([0-9]*\)", "", sym.strip())
# Extract element name and its occupancy from the
# string, and store it as a
# key-value pair in "els_occ".
els_occu[
str(re.findall(r"\D+", symbol_str_lst[elocc_idx].strip())[1]).replace("<sup>", "")
] = float("0" + re.findall(r"\.?\d+", symbol_str_lst[elocc_idx].strip())[1])
x = str2float(data["_atom_site_fract_x"][idx])
y = str2float(data["_atom_site_fract_y"][idx])
z = str2float(data["_atom_site_fract_z"][idx])
for et, occu in els_occu.items():
# new atom site labels have 'fix' appended
new_atom_site_label.append(et + "_fix" + str(len(new_atom_site_label)))
new_atom_site_type_symbol.append(et)
new_atom_site_occupancy.append(str(occu))
new_fract_x.append(str(x))
new_fract_y.append(str(y))
new_fract_z.append(str(z))
idxs_to_remove.append(idx)
# Remove the original row by iterating over all keys in the CIF
# data looking for lists, which indicates
# multiple data items, one for each row, and remove items from the
# list that corresponds to the removed row,
# so that it's not processed by the rest of this function (which
# would result in an error).
for original_key in data.data:
if isinstance(data.data[original_key], list):
for id in sorted(idxs_to_remove, reverse=True):
del data.data[original_key][id]
if len(idxs_to_remove) > 0:
self.warnings.append("Pauling file corrections applied.")
data.data["_atom_site_label"] += new_atom_site_label
data.data["_atom_site_type_symbol"] += new_atom_site_type_symbol
data.data["_atom_site_occupancy"] += new_atom_site_occupancy
data.data["_atom_site_fract_x"] += new_fract_x
data.data["_atom_site_fract_y"] += new_fract_y
data.data["_atom_site_fract_z"] += new_fract_z
"""
This fixes inconsistencies in naming of several magCIF tags
as a result of magCIF being in widespread use prior to
specification being finalized (on advice of Branton Campbell).
"""
if self.feature_flags["magcif"]:
# CIF-1 style has all underscores, interim standard
# had period before magn instead of before the final
# component (e.g. xyz)
# we want to standardize on a specific key, to simplify
# parsing code
correct_keys = [
"_space_group_symop_magn_operation.xyz",
"_space_group_symop_magn_centering.xyz",
"_space_group_magn.name_BNS",
"_space_group_magn.number_BNS",
"_atom_site_moment_crystalaxis_x",
"_atom_site_moment_crystalaxis_y",
"_atom_site_moment_crystalaxis_z",
"_atom_site_moment_label",
]
# cannot mutate OrderedDict during enumeration,
# so store changes we want to make
changes_to_make = {}
for original_key in data.data:
for correct_key in correct_keys:
# convert to all underscore
trial_key = "_".join(correct_key.split("."))
test_key = "_".join(original_key.split("."))
if trial_key == test_key:
changes_to_make[correct_key] = original_key
# make changes
for correct_key, original_key in changes_to_make.items():
data.data[correct_key] = data.data[original_key]
# renamed_keys maps interim_keys to final_keys
renamed_keys = {
"_magnetic_space_group.transform_to_standard_Pp_abc": "_space_group_magn.transform_BNS_Pp_abc"
}
changes_to_make = {}
for interim_key, final_key in renamed_keys.items():
if data.data.get(interim_key):
changes_to_make[final_key] = interim_key
if len(changes_to_make) > 0:
self.warnings.append("Keys changed to match new magCIF specification.")
for final_key, interim_key in changes_to_make.items():
data.data[final_key] = data.data[interim_key]
# check for finite precision frac co-ordinates (e.g. 0.6667 instead of 0.6666666...7)
# this can sometimes cause serious issues when applying symmetry operations
important_fracs = (1 / 3.0, 2 / 3.0)
fracs_to_change = {}
for label in ("_atom_site_fract_x", "_atom_site_fract_y", "_atom_site_fract_z"):
if label in data.data.keys():
for idx, frac in enumerate(data.data[label]):
try:
frac = str2float(frac)
except Exception:
# co-ordinate might not be defined e.g. '?'
continue
for comparison_frac in important_fracs:
if abs(1 - frac / comparison_frac) < 1e-4:
fracs_to_change[(label, idx)] = str(comparison_frac)
if fracs_to_change:
self.warnings.append(
"Some fractional co-ordinates rounded to ideal values to avoid issues with finite precision."
)
for (label, idx), val in fracs_to_change.items():
data.data[label][idx] = val
return data
def _unique_coords(self, coords_in, magmoms_in=None, lattice=None):
"""
Generate unique coordinates using coord and symmetry positions
and also their corresponding magnetic moments, if supplied.
"""
coords = []
if magmoms_in:
magmoms = []
if len(magmoms_in) != len(coords_in):
raise ValueError
for tmp_coord, tmp_magmom in zip(coords_in, magmoms_in):
for op in self.symmetry_operations:
coord = op.operate(tmp_coord)
coord = np.array([i - math.floor(i) for i in coord])
if isinstance(op, MagSymmOp):
# Up to this point, magmoms have been defined relative
# to crystal axis. Now convert to Cartesian and into
# a Magmom object.
magmom = Magmom.from_moment_relative_to_crystal_axes(
op.operate_magmom(tmp_magmom), lattice=lattice
)
else:
magmom = Magmom(tmp_magmom)
if not in_coord_list_pbc(coords, coord, atol=self._site_tolerance):
coords.append(coord)
magmoms.append(magmom)
return coords, magmoms
for tmp_coord in coords_in:
for op in self.symmetry_operations:
coord = op.operate(tmp_coord)
coord = np.array([i - math.floor(i) for i in coord])
if not in_coord_list_pbc(coords, coord, atol=self._site_tolerance):
coords.append(coord)
return coords, [Magmom(0)] * len(coords) # return dummy magmoms
def get_lattice(
self,
data,
length_strings=("a", "b", "c"),
angle_strings=("alpha", "beta", "gamma"),
lattice_type=None,
):
"""
Generate the lattice from the provided lattice parameters. In
the absence of all six lattice parameters, the crystal system
and necessary parameters are parsed
"""
try:
lengths = [str2float(data["_cell_length_" + i]) for i in length_strings]
angles = [str2float(data["_cell_angle_" + i]) for i in angle_strings]
if not lattice_type:
return Lattice.from_parameters(*lengths, *angles)
return getattr(Lattice, lattice_type)(*(lengths + angles))
except KeyError:
# Missing Key search for cell setting
for lattice_lable in [
"_symmetry_cell_setting",
"_space_group_crystal_system",
]:
if data.data.get(lattice_lable):
lattice_type = data.data.get(lattice_lable).lower()
try:
required_args = getargspec(getattr(Lattice, lattice_type)).args
lengths = (l for l in length_strings if l in required_args)
angles = (a for a in angle_strings if a in required_args)
return self.get_lattice(data, lengths, angles, lattice_type=lattice_type)
except AttributeError as exc:
self.warnings.append(str(exc))
warnings.warn(exc)
else:
return None
return None
def get_symops(self, data):
"""
In order to generate symmetry equivalent positions, the symmetry
operations are parsed. If the symops are not present, the space
group symbol is parsed, and symops are generated.
"""
symops = []
for symmetry_label in [
"_symmetry_equiv_pos_as_xyz",
"_symmetry_equiv_pos_as_xyz_",
"_space_group_symop_operation_xyz",
"_space_group_symop_operation_xyz_",
]:
if data.data.get(symmetry_label):
xyz = data.data.get(symmetry_label)
if isinstance(xyz, str):
msg = "A 1-line symmetry op P1 CIF is detected!"
warnings.warn(msg)
self.warnings.append(msg)
xyz = [xyz]
try:
symops = [SymmOp.from_xyz_string(s) for s in xyz]
break
except ValueError:
continue
if not symops:
# Try to parse symbol
for symmetry_label in [
"_symmetry_space_group_name_H-M",
"_symmetry_space_group_name_H_M",
"_symmetry_space_group_name_H-M_",
"_symmetry_space_group_name_H_M_",
"_space_group_name_Hall",
"_space_group_name_Hall_",
"_space_group_name_H-M_alt",
"_space_group_name_H-M_alt_",
"_symmetry_space_group_name_hall",
"_symmetry_space_group_name_hall_",
"_symmetry_space_group_name_h-m",
"_symmetry_space_group_name_h-m_",
]:
sg = data.data.get(symmetry_label)
if sg:
sg = sub_spgrp(sg)
try:
spg = space_groups.get(sg)
if spg:
symops = SpaceGroup(spg).symmetry_ops
msg = (
"No _symmetry_equiv_pos_as_xyz type key found. "
"Spacegroup from %s used." % symmetry_label
)
warnings.warn(msg)
self.warnings.append(msg)
break
except ValueError:
# Ignore any errors
pass
try:
for d in _get_cod_data():
if sg == re.sub(r"\s+", "", d["hermann_mauguin"]):
xyz = d["symops"]
symops = [SymmOp.from_xyz_string(s) for s in xyz]
msg = (
"No _symmetry_equiv_pos_as_xyz type key found. "
"Spacegroup from %s used." % symmetry_label
)
warnings.warn(msg)
self.warnings.append(msg)
break
except Exception:
continue
if symops:
break
if not symops:
# Try to parse International number
for symmetry_label in [
"_space_group_IT_number",
"_space_group_IT_number_",
"_symmetry_Int_Tables_number",
"_symmetry_Int_Tables_number_",
]:
if data.data.get(symmetry_label):
try:
i = int(str2float(data.data.get(symmetry_label)))
symops = SpaceGroup.from_int_number(i).symmetry_ops
break
except ValueError:
continue
if not symops:
msg = "No _symmetry_equiv_pos_as_xyz type key found. Defaulting to P1."
warnings.warn(msg)
self.warnings.append(msg)
symops = [SymmOp.from_xyz_string(s) for s in ["x", "y", "z"]]
return symops
def get_magsymops(self, data):
"""
Equivalent to get_symops except for magnetic symmetry groups.
Separate function since additional operation for time reversal symmetry
(which changes magnetic moments on sites) needs to be returned.
"""
magsymmops = []
# check to see if magCIF file explicitly contains magnetic symmetry operations
if data.data.get("_space_group_symop_magn_operation.xyz"):
xyzt = data.data.get("_space_group_symop_magn_operation.xyz")
if isinstance(xyzt, str):
xyzt = [xyzt]
magsymmops = [MagSymmOp.from_xyzt_string(s) for s in xyzt]
if data.data.get("_space_group_symop_magn_centering.xyz"):
xyzt = data.data.get("_space_group_symop_magn_centering.xyz")
if isinstance(xyzt, str):
xyzt = [xyzt]
centering_symops = [MagSymmOp.from_xyzt_string(s) for s in xyzt]
all_ops = []
for op in magsymmops:
for centering_op in centering_symops:
new_translation = [
i - np.floor(i) for i in op.translation_vector + centering_op.translation_vector
]
new_time_reversal = op.time_reversal * centering_op.time_reversal
all_ops.append(
MagSymmOp.from_rotation_and_translation_and_time_reversal(
rotation_matrix=op.rotation_matrix,
translation_vec=new_translation,
time_reversal=new_time_reversal,
)
)
magsymmops = all_ops
# else check to see if it specifies a magnetic space group
elif data.data.get("_space_group_magn.name_BNS") or data.data.get("_space_group_magn.number_BNS"):
if data.data.get("_space_group_magn.name_BNS"):
# get BNS label for MagneticSpaceGroup()
id = data.data.get("_space_group_magn.name_BNS")
else:
# get BNS number for MagneticSpaceGroup()
# by converting string to list of ints
id = list(map(int, (data.data.get("_space_group_magn.number_BNS").split("."))))
if data.data.get("_space_group_magn.transform_BNS_Pp_abc"):
if data.data.get("_space_group_magn.transform_BNS_Pp_abc") != "a,b,c;0,0,0":
jf = data.data.get("_space_group_magn.transform_BNS_Pp_abc")
msg = MagneticSpaceGroup(id, jf)
elif data.data.get("_space_group_magn.transform_BNS_Pp"):
return NotImplementedError("Incomplete specification to implement.")
else:
msg = MagneticSpaceGroup(id)
magsymmops = msg.symmetry_ops
if not magsymmops:
msg = "No magnetic symmetry detected, using primitive symmetry."
warnings.warn(msg)
self.warnings.append(msg)
magsymmops = [MagSymmOp.from_xyzt_string("x, y, z, 1")]
return magsymmops
@staticmethod
def parse_oxi_states(data):
"""
Parse oxidation states from data dictionary
"""
try:
oxi_states = {
data["_atom_type_symbol"][i]: str2float(data["_atom_type_oxidation_number"][i])
for i in range(len(data["_atom_type_symbol"]))
}
# attempt to strip oxidation state from _atom_type_symbol
# in case the label does not contain an oxidation state
for i, symbol in enumerate(data["_atom_type_symbol"]):
oxi_states[re.sub(r"\d?[\+,\-]?$", "", symbol)] = str2float(data["_atom_type_oxidation_number"][i])
except (ValueError, KeyError):
oxi_states = None
return oxi_states
@staticmethod
def parse_magmoms(data, lattice=None):
"""
Parse atomic magnetic moments from data dictionary
"""
if lattice is None:
raise Exception("Magmoms given in terms of crystal axes in magCIF spec.")
try:
magmoms = {
data["_atom_site_moment_label"][i]: np.array(
[
str2float(data["_atom_site_moment_crystalaxis_x"][i]),
str2float(data["_atom_site_moment_crystalaxis_y"][i]),
str2float(data["_atom_site_moment_crystalaxis_z"][i]),
]
)
for i in range(len(data["_atom_site_moment_label"]))
}
except (ValueError, KeyError):
return None
return magmoms
def _parse_symbol(self, sym):
"""
Parse a string with a symbol to extract a string representing an element.
Args:
sym (str): A symbol to be parsed.
Returns:
A string with the parsed symbol. None if no parsing was possible.
"""
# Common representations for elements/water in cif files
# TODO: fix inconsistent handling of water
special = {
"Hw": "H",
"Ow": "O",
"Wat": "O",
"wat": "O",
"OH": "",
"OH2": "",
"NO3": "N",
}
parsed_sym = None
# try with special symbols, otherwise check the first two letters,
# then the first letter alone. If everything fails try extracting the
# first letters.
m_sp = re.match("|".join(special.keys()), sym)
if m_sp:
parsed_sym = special[m_sp.group()]
elif Element.is_valid_symbol(sym[:2].title()):
parsed_sym = sym[:2].title()
elif Element.is_valid_symbol(sym[0].upper()):
parsed_sym = sym[0].upper()
else:
m = re.match(r"w?[A-Z][a-z]*", sym)
if m:
parsed_sym = m.group()
if parsed_sym is not None and (m_sp or not re.match(fr"{parsed_sym}\d*", sym)):
msg = f"{sym} parsed as {parsed_sym}"
warnings.warn(msg)
self.warnings.append(msg)
return parsed_sym
def _get_structure(self, data, primitive, symmetrized):
"""
Generate structure from part of the cif.
"""
def get_num_implicit_hydrogens(sym):
num_h = {"Wat": 2, "wat": 2, "O-H": 1}
return num_h.get(sym[:3], 0)
lattice = self.get_lattice(data)
# if magCIF, get magnetic symmetry moments and magmoms
# else standard CIF, and use empty magmom dict
if self.feature_flags["magcif_incommensurate"]:
raise NotImplementedError("Incommensurate structures not currently supported.")
if self.feature_flags["magcif"]:
self.symmetry_operations = self.get_magsymops(data)
magmoms = self.parse_magmoms(data, lattice=lattice)
else:
self.symmetry_operations = self.get_symops(data)
magmoms = {}
oxi_states = self.parse_oxi_states(data)
coord_to_species = OrderedDict()
coord_to_magmoms = OrderedDict()
def get_matching_coord(coord):
keys = list(coord_to_species.keys())
coords = np.array(keys)
for op in self.symmetry_operations:
c = op.operate(coord)
inds = find_in_coord_list_pbc(coords, c, atol=self._site_tolerance)
# cant use if inds, because python is dumb and np.array([0]) evaluates
# to False
if len(inds):
return keys[inds[0]]
return False
for i in range(len(data["_atom_site_label"])):
try:
# If site type symbol exists, use it. Otherwise, we use the
# label.
symbol = self._parse_symbol(data["_atom_site_type_symbol"][i])
num_h = get_num_implicit_hydrogens(data["_atom_site_type_symbol"][i])
except KeyError:
symbol = self._parse_symbol(data["_atom_site_label"][i])
num_h = get_num_implicit_hydrogens(data["_atom_site_label"][i])
if not symbol:
continue
if oxi_states is not None:
o_s = oxi_states.get(symbol, 0)
# use _atom_site_type_symbol if possible for oxidation state
if "_atom_site_type_symbol" in data.data.keys():
oxi_symbol = data["_atom_site_type_symbol"][i]
o_s = oxi_states.get(oxi_symbol, o_s)
try:
el = Species(symbol, o_s)
except Exception:
el = DummySpecies(symbol, o_s)
else:
el = get_el_sp(symbol)
x = str2float(data["_atom_site_fract_x"][i])
y = str2float(data["_atom_site_fract_y"][i])
z = str2float(data["_atom_site_fract_z"][i])
magmom = magmoms.get(data["_atom_site_label"][i], np.array([0, 0, 0]))
try:
occu = str2float(data["_atom_site_occupancy"][i])
except (KeyError, ValueError):
occu = 1
if occu > 0:
coord = (x, y, z)
match = get_matching_coord(coord)
comp_d = {el: occu}
if num_h > 0:
comp_d["H"] = num_h
self.warnings.append(
"Structure has implicit hydrogens defined, "
"parsed structure unlikely to be suitable for use "
"in calculations unless hydrogens added."
)
comp = Composition(comp_d)
if not match:
coord_to_species[coord] = comp
coord_to_magmoms[coord] = magmom
else:
coord_to_species[match] += comp
# disordered magnetic not currently supported
coord_to_magmoms[match] = None
sum_occu = [
sum(c.values()) for c in coord_to_species.values() if not set(c.elements) == {Element("O"), Element("H")}
]
if any(o > 1 for o in sum_occu):
msg = (
"Some occupancies ({}) sum to > 1! If they are within "
"the occupancy_tolerance, they will be rescaled. "
"The current occupancy_tolerance is set to: {}".format(sum_occu, self._occupancy_tolerance)
)
warnings.warn(msg)
self.warnings.append(msg)
allspecies = []
allcoords = []
allmagmoms = []
allhydrogens = []
equivalent_indices = []
# check to see if magCIF file is disordered
if self.feature_flags["magcif"]:
for k, v in coord_to_magmoms.items():
if v is None:
# Proposed solution to this is to instead store magnetic
# moments as Species 'spin' property, instead of site
# property, but this introduces ambiguities for end user
# (such as unintended use of `spin` and Species will have
# fictious oxidation state).
raise NotImplementedError("Disordered magnetic structures not currently supported.")
if coord_to_species.items():
for idx, (comp, group) in enumerate(
groupby(
sorted(list(coord_to_species.items()), key=lambda x: x[1]),
key=lambda x: x[1],
)
):
tmp_coords = [site[0] for site in group]
tmp_magmom = [coord_to_magmoms[tmp_coord] for tmp_coord in tmp_coords]
if self.feature_flags["magcif"]:
coords, magmoms = self._unique_coords(tmp_coords, magmoms_in=tmp_magmom, lattice=lattice)
else:
coords, magmoms = self._unique_coords(tmp_coords)
if set(comp.elements) == {Element("O"), Element("H")}:
# O with implicit hydrogens
im_h = comp["H"]
species = Composition({"O": comp["O"]})
else:
im_h = 0
species = comp
# The following might be a more natural representation of equivalent indicies,
# but is not in the format expect by SymmetrizedStructure:
# equivalent_indices.append(list(range(len(allcoords), len(coords)+len(allcoords))))
# The above gives a list like:
# [[0, 1, 2, 3], [4, 5, 6, 7, 8, 9, 10, 11]] where the
# integers are site indices, whereas the version used below will give a version like:
# [0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1]
# which is a list in the same order as the sites, but where if a site has the same integer
# it is equivalent.
equivalent_indices += len(coords) * [idx]
allhydrogens.extend(len(coords) * [im_h])
allcoords.extend(coords)
allspecies.extend(len(coords) * [species])
allmagmoms.extend(magmoms)
# rescale occupancies if necessary
for i, species in enumerate(allspecies):
totaloccu = sum(species.values())
if 1 < totaloccu <= self._occupancy_tolerance:
allspecies[i] = species / totaloccu
if allspecies and len(allspecies) == len(allcoords) and len(allspecies) == len(allmagmoms):
site_properties = {}
if any(allhydrogens):
assert len(allhydrogens) == len(allcoords)
site_properties["implicit_hydrogens"] = allhydrogens
if self.feature_flags["magcif"]:
site_properties["magmom"] = allmagmoms
if len(site_properties) == 0:
site_properties = None
struct = Structure(lattice, allspecies, allcoords, site_properties=site_properties)
if symmetrized:
# Wyckoff labels not currently parsed, note that not all CIFs will contain Wyckoff labels
# TODO: extract Wyckoff labels (or other CIF attributes) and include as site_properties
wyckoffs = ["Not Parsed"] * len(struct)
# Names of space groups are likewise not parsed (again, not all CIFs will contain this information)
# What is stored are the lists of symmetry operations used to generate the structure
# TODO: ensure space group labels are stored if present
sg = SpacegroupOperations("Not Parsed", -1, self.symmetry_operations)
return SymmetrizedStructure(struct, sg, equivalent_indices, wyckoffs)
struct = struct.get_sorted_structure()
if primitive and self.feature_flags["magcif"]:
struct = struct.get_primitive_structure(use_site_props=True)
elif primitive:
struct = struct.get_primitive_structure()
struct = struct.get_reduced_structure()
return struct
def get_structures(self, primitive=True, symmetrized=False):
"""
Return list of structures in CIF file. primitive boolean sets whether a
conventional cell structure or primitive cell structure is returned.
Args:
primitive (bool): Set to False to return conventional unit cells.
Defaults to True. With magnetic CIF files, will return primitive
magnetic cell which may be larger than nuclear primitive cell.
symmetrized (bool): If True, return a SymmetrizedStructure which will
include the equivalent indices and symmetry operations used to
create the Structure as provided by the CIF (if explicit symmetry
operations are included in the CIF) or generated from information
in the CIF (if only space group labels are provided). Note that
currently Wyckoff labels and space group labels or numbers are
not included in the generated SymmetrizedStructure, these will be
notated as "Not Parsed" or -1 respectively.
Returns:
List of Structures.
"""
if primitive and symmetrized:
raise ValueError(
"Using both 'primitive' and 'symmetrized' arguments is not currently supported "
"since unexpected behavior might result."
)
structures = []
for i, d in enumerate(self._cif.data.values()):
try:
s = self._get_structure(d, primitive, symmetrized)
if s:
structures.append(s)
except (KeyError, ValueError) as exc:
# Warn the user (Errors should never pass silently)
# A user reported a problem with cif files produced by Avogadro
# in which the atomic coordinates are in Cartesian coords.
self.warnings.append(str(exc))
warnings.warn("No structure parsed for %d structure in CIF. Section of CIF file below." % (i + 1))
warnings.warn(str(d))
warnings.warn("Error is %s." % str(exc))
if self.warnings:
warnings.warn("Issues encountered while parsing CIF: %s" % "\n".join(self.warnings))
if len(structures) == 0:
raise ValueError("Invalid cif file with no structures!")
return structures
def get_bibtex_string(self):
"""
Get BibTeX reference from CIF file.
:param data:
:return: BibTeX string
"""
try:
from pybtex.database import BibliographyData, Entry
except ImportError:
raise RuntimeError("Bibliographic data extraction requires pybtex.")
bibtex_keys = {
"author": ("_publ_author_name", "_citation_author_name"),
"title": ("_publ_section_title", "_citation_title"),
"journal": (
"_journal_name_full",
"_journal_name_abbrev",
"_citation_journal_full",
"_citation_journal_abbrev",
),
"volume": ("_journal_volume", "_citation_journal_volume"),
"year": ("_journal_year", "_citation_year"),
"number": ("_journal_number", "_citation_number"),
"page_first": ("_journal_page_first", "_citation_page_first"),
"page_last": ("_journal_page_last", "_citation_page_last"),
"doi": ("_journal_DOI", "_citation_DOI"),
}
entries = {}
# TODO: parse '_publ_section_references' when it exists?
# TODO: CIF specification supports multiple citations.
for idx, data in enumerate(self._cif.data.values()):
# convert to lower-case keys, some cif files inconsistent
data = {k.lower(): v for k, v in data.data.items()}
bibtex_entry = {}
for field, tags in bibtex_keys.items():
for tag in tags:
if tag in data:
if isinstance(data[tag], list):
bibtex_entry[field] = data[tag][0]
else:
bibtex_entry[field] = data[tag]
# convert to bibtex author format ('and' delimited)
if "author" in bibtex_entry:
# separate out semicolon authors
if isinstance(bibtex_entry["author"], str):
if ";" in bibtex_entry["author"]:
bibtex_entry["author"] = bibtex_entry["author"].split(";")
if isinstance(bibtex_entry["author"], list):
bibtex_entry["author"] = " and ".join(bibtex_entry["author"])
# convert to bibtex page range format, use empty string if not specified
if ("page_first" in bibtex_entry) or ("page_last" in bibtex_entry):
bibtex_entry["pages"] = "{}--{}".format(
bibtex_entry.get("page_first", ""),
bibtex_entry.get("page_last", ""),
)
bibtex_entry.pop("page_first", None) # and remove page_first, page_list if present
bibtex_entry.pop("page_last", None)
# cite keys are given as cif-reference-idx in order they are found
entries[f"cifref{idx}"] = Entry("article", list(bibtex_entry.items()))
return BibliographyData(entries).to_string(bib_format="bibtex")
def as_dict(self):
"""
:return: MSONable dict
"""
d = OrderedDict()
for k, v in self._cif.data.items():
d[k] = {}
for k2, v2 in v.data.items():
d[k][k2] = v2
return d
@property
def has_errors(self):
"""
:return: Whether there are errors/warnings detected in CIF parsing.
"""
return len(self.warnings) > 0
class CifWriter:
"""
A wrapper around CifFile to write CIF files from pymatgen structures.
"""
def __init__(
self,
struct,
symprec=None,
write_magmoms=False,
significant_figures=8,
angle_tolerance=5.0,
refine_struct=True,
):
"""
Args:
struct (Structure): structure to write
symprec (float): If not none, finds the symmetry of the structure
and writes the cif with symmetry information. Passes symprec
to the SpacegroupAnalyzer. See also refine_struct.
write_magmoms (bool): If True, will write magCIF file. Incompatible
with symprec
significant_figures (int): Specifies precision for formatting of floats.
Defaults to 8.
angle_tolerance (float): Angle tolerance for symmetry finding. Passes
angle_tolerance to the SpacegroupAnalyzer. Used only if symprec
is not None.
refine_struct: Used only if symprec is not None. If True, get_refined_structure
is invoked to convert input structure from primitive to conventional.
"""
if write_magmoms and symprec:
warnings.warn("Magnetic symmetry cannot currently be detected by pymatgen,disabling symmetry detection.")
symprec = None
format_str = "{:.%df}" % significant_figures
block = OrderedDict()
loops = []
spacegroup = ("P 1", 1)
if symprec is not None:
sf = SpacegroupAnalyzer(struct, symprec, angle_tolerance=angle_tolerance)
spacegroup = (sf.get_space_group_symbol(), sf.get_space_group_number())
if refine_struct:
# Needs the refined structure when using symprec. This converts
# primitive to conventional structures, the standard for CIF.
struct = sf.get_refined_structure()
latt = struct.lattice
comp = struct.composition
no_oxi_comp = comp.element_composition
block["_symmetry_space_group_name_H-M"] = spacegroup[0]
for cell_attr in ["a", "b", "c"]:
block["_cell_length_" + cell_attr] = format_str.format(getattr(latt, cell_attr))
for cell_attr in ["alpha", "beta", "gamma"]:
block["_cell_angle_" + cell_attr] = format_str.format(getattr(latt, cell_attr))
block["_symmetry_Int_Tables_number"] = spacegroup[1]
block["_chemical_formula_structural"] = no_oxi_comp.reduced_formula
block["_chemical_formula_sum"] = no_oxi_comp.formula
block["_cell_volume"] = format_str.format(latt.volume)
reduced_comp, fu = no_oxi_comp.get_reduced_composition_and_factor()
block["_cell_formula_units_Z"] = str(int(fu))
if symprec is None:
block["_symmetry_equiv_pos_site_id"] = ["1"]
block["_symmetry_equiv_pos_as_xyz"] = ["x, y, z"]
else:
sf = SpacegroupAnalyzer(struct, symprec)
symmops = []
for op in sf.get_symmetry_operations():
v = op.translation_vector
symmops.append(SymmOp.from_rotation_and_translation(op.rotation_matrix, v))
ops = [op.as_xyz_string() for op in symmops]
block["_symmetry_equiv_pos_site_id"] = ["%d" % i for i in range(1, len(ops) + 1)]
block["_symmetry_equiv_pos_as_xyz"] = ops
loops.append(["_symmetry_equiv_pos_site_id", "_symmetry_equiv_pos_as_xyz"])
try:
symbol_to_oxinum = OrderedDict([(el.__str__(), float(el.oxi_state)) for el in sorted(comp.elements)])
block["_atom_type_symbol"] = symbol_to_oxinum.keys()
block["_atom_type_oxidation_number"] = symbol_to_oxinum.values()
loops.append(["_atom_type_symbol", "_atom_type_oxidation_number"])
except (TypeError, AttributeError):
symbol_to_oxinum = OrderedDict([(el.symbol, 0) for el in sorted(comp.elements)])
atom_site_type_symbol = []
atom_site_symmetry_multiplicity = []
atom_site_fract_x = []
atom_site_fract_y = []
atom_site_fract_z = []
atom_site_label = []
atom_site_occupancy = []
atom_site_moment_label = []
atom_site_moment_crystalaxis_x = []
atom_site_moment_crystalaxis_y = []
atom_site_moment_crystalaxis_z = []
count = 0
if symprec is None:
for site in struct:
for sp, occu in sorted(site.species.items()):
atom_site_type_symbol.append(sp.__str__())
atom_site_symmetry_multiplicity.append("1")
atom_site_fract_x.append(format_str.format(site.a))
atom_site_fract_y.append(format_str.format(site.b))
atom_site_fract_z.append(format_str.format(site.c))
atom_site_label.append(f"{sp.symbol}{count}")
atom_site_occupancy.append(occu.__str__())
magmom = Magmom(site.properties.get("magmom", getattr(sp, "spin", 0)))
if write_magmoms and abs(magmom) > 0:
moment = Magmom.get_moment_relative_to_crystal_axes(magmom, latt)
atom_site_moment_label.append(f"{sp.symbol}{count}")
atom_site_moment_crystalaxis_x.append(format_str.format(moment[0]))
atom_site_moment_crystalaxis_y.append(format_str.format(moment[1]))
atom_site_moment_crystalaxis_z.append(format_str.format(moment[2]))
count += 1
else:
# The following just presents a deterministic ordering.
unique_sites = [
(
sorted(sites, key=lambda s: tuple(abs(x) for x in s.frac_coords))[0],
len(sites),
)
for sites in sf.get_symmetrized_structure().equivalent_sites
]
for site, mult in sorted(
unique_sites,
key=lambda t: (
t[0].species.average_electroneg,
-t[1],
t[0].a,
t[0].b,
t[0].c,
),
):
for sp, occu in site.species.items():
atom_site_type_symbol.append(sp.__str__())
atom_site_symmetry_multiplicity.append("%d" % mult)
atom_site_fract_x.append(format_str.format(site.a))
atom_site_fract_y.append(format_str.format(site.b))
atom_site_fract_z.append(format_str.format(site.c))
atom_site_label.append(f"{sp.symbol}{count}")
atom_site_occupancy.append(occu.__str__())
count += 1
block["_atom_site_type_symbol"] = atom_site_type_symbol
block["_atom_site_label"] = atom_site_label
block["_atom_site_symmetry_multiplicity"] = atom_site_symmetry_multiplicity
block["_atom_site_fract_x"] = atom_site_fract_x
block["_atom_site_fract_y"] = atom_site_fract_y
block["_atom_site_fract_z"] = atom_site_fract_z
block["_atom_site_occupancy"] = atom_site_occupancy
loops.append(
[
"_atom_site_type_symbol",
"_atom_site_label",
"_atom_site_symmetry_multiplicity",
"_atom_site_fract_x",
"_atom_site_fract_y",
"_atom_site_fract_z",
"_atom_site_occupancy",
]
)
if write_magmoms:
block["_atom_site_moment_label"] = atom_site_moment_label
block["_atom_site_moment_crystalaxis_x"] = atom_site_moment_crystalaxis_x
block["_atom_site_moment_crystalaxis_y"] = atom_site_moment_crystalaxis_y
block["_atom_site_moment_crystalaxis_z"] = atom_site_moment_crystalaxis_z
loops.append(
[
"_atom_site_moment_label",
"_atom_site_moment_crystalaxis_x",
"_atom_site_moment_crystalaxis_y",
"_atom_site_moment_crystalaxis_z",
]
)
d = OrderedDict()
d[comp.reduced_formula] = CifBlock(block, loops, comp.reduced_formula)
self._cf = CifFile(d)
@property
def ciffile(self):
"""
Returns: CifFile associated with the CifWriter.
"""
return self._cf
def __str__(self):
"""
Returns the cif as a string.
"""
return self._cf.__str__()
def write_file(self, filename):
"""
Write the cif file.
"""
with zopen(filename, "wt") as f:
f.write(self.__str__())
def str2float(text):
"""
Remove uncertainty brackets from strings and return the float.
"""
try:
# Note that the ending ) is sometimes missing. That is why the code has
# been modified to treat it as optional. Same logic applies to lists.
return float(re.sub(r"\(.+\)*", "", text))
except TypeError:
if isinstance(text, list) and len(text) == 1:
return float(re.sub(r"\(.+\)*", "", text[0]))
except ValueError as ex:
if text.strip() == ".":
return 0
raise ex
raise ValueError(f"{text} cannot be converted to float")
|
vorwerkc/pymatgen
|
pymatgen/io/cif.py
|
Python
|
mit
| 60,831
|
[
"Avogadro",
"CRYSTAL",
"pymatgen"
] |
808044bb29c0f2c24a80d1c08fd57b5824d758d490f0e562071adc371116a56b
|
################################################################################
# The Neural Network (NN) based Speech Synthesis System
# https://svn.ecdf.ed.ac.uk/repo/inf/dnn_tts/
#
# Centre for Speech Technology Research
# University of Edinburgh, UK
# Copyright (c) 2014-2015
# All Rights Reserved.
#
# The system as a whole and most of the files in it are distributed
# under the following copyright and conditions
#
# Permission is hereby granted, free of charge, to use and distribute
# this software and its documentation without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of this work, and to
# permit persons to whom this work is furnished to do so, subject to
# the following conditions:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# - The authors' names may not be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THE UNIVERSITY OF EDINBURGH AND THE CONTRIBUTORS TO THIS WORK
# DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT
# SHALL THE UNIVERSITY OF EDINBURGH NOR THE CONTRIBUTORS BE LIABLE
# FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN
# AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION,
# ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF
# THIS SOFTWARE.
################################################################################
import pickle
import gzip
import os, sys, errno
import time
import math
# numpy & theano imports need to be done in this order (only for some numpy installations, not sure why)
import numpy
# we need to explicitly import this in some cases, not sure why this doesn't get imported with numpy itself
import numpy.distutils.__config__
# and only after that can we import theano
import theano
from utils.providers import ListDataProvider
from frontend.label_normalisation import HTSLabelNormalisation, XMLLabelNormalisation
from frontend.silence_remover import SilenceRemover
from frontend.silence_remover import trim_silence
from frontend.min_max_norm import MinMaxNormalisation
#from frontend.acoustic_normalisation import CMPNormalisation
from frontend.acoustic_composition import AcousticComposition
from frontend.parameter_generation import ParameterGeneration
#from frontend.feature_normalisation_base import FeatureNormBase
from frontend.mean_variance_norm import MeanVarianceNorm
from frontend.mlpg_fast import MLParameterGenerationFast
# the new class for label composition and normalisation
from frontend.label_composer import LabelComposer
import configuration
from models.dnn import DNN
#from models.ms_dnn import MultiStreamDNN
#from models.ms_dnn_gv import MultiStreamDNNGv
#from models.sdae import StackedDenoiseAutoEncoder
from models.mdn import MixtureDensityNetwork
from utils.compute_distortion import DistortionComputation, IndividualDistortionComp
from utils.generate import generate_wav
from utils.learn_rates import ExpDecreaseLearningRate
from io_funcs.binary_io import BinaryIOCollection
#import matplotlib.pyplot as plt
# our custom logging class that can also plot
#from logplot.logging_plotting import LoggerPlotter, MultipleTimeSeriesPlot, SingleWeightMatrixPlot
from logplot.logging_plotting import LoggerPlotter, MultipleSeriesPlot, SingleWeightMatrixPlot
import logging # as logging
import logging.config
import io
def extract_file_id_list(file_list):
file_id_list = []
for file_name in file_list:
file_id = os.path.basename(os.path.splitext(file_name)[0])
file_id_list.append(file_id)
return file_id_list
def read_file_list(file_name):
logger = logging.getLogger("read_file_list")
file_lists = []
fid = open(file_name)
for line in fid.readlines():
line = line.strip()
if len(line) < 1:
continue
file_lists.append(line)
fid.close()
logger.debug('Read file list from %s' % file_name)
return file_lists
def make_output_file_list(out_dir, in_file_lists):
out_file_lists = []
for in_file_name in in_file_lists:
file_id = os.path.basename(in_file_name)
out_file_name = out_dir + '/' + file_id
out_file_lists.append(out_file_name)
return out_file_lists
def prepare_file_path_list(file_id_list, file_dir, file_extension, new_dir_switch=True):
if not os.path.exists(file_dir) and new_dir_switch:
os.makedirs(file_dir)
file_name_list = []
for file_id in file_id_list:
file_name = file_dir + '/' + file_id + file_extension
file_name_list.append(file_name)
return file_name_list
def visualize_dnn(dnn):
layer_num = len(dnn.params) / 2 ## including input and output
for i in range(layer_num):
fig_name = 'Activation weights W' + str(i)
fig_title = 'Activation weights of W' + str(i)
xlabel = 'Neuron index of hidden layer ' + str(i)
ylabel = 'Neuron index of hidden layer ' + str(i+1)
if i == 0:
xlabel = 'Input feature index'
if i == layer_num-1:
ylabel = 'Output feature index'
logger.create_plot(fig_name, SingleWeightMatrixPlot)
plotlogger.add_plot_point(fig_name, fig_name, dnn.params[i*2].get_value(borrow=True).T)
plotlogger.save_plot(fig_name, title=fig_name, xlabel=xlabel, ylabel=ylabel)
def train_DNN(train_xy_file_list, valid_xy_file_list, \
nnets_file_name, n_ins, n_outs, ms_outs, hyper_params, buffer_size, \
mdn_component, var_floor=0.01, beta_opt=False, eff_sample_size=0.8, mean_log_det=-100.0, \
plot=False, start_from_trained_model='_'):
# get loggers for this function
# this one writes to both console and file
logger = logging.getLogger("main.train_DNN")
logger.debug('Starting train_DNN')
if plot:
# this one takes care of plotting duties
plotlogger = logging.getLogger("plotting")
# create an (empty) plot of training convergence, ready to receive data points
logger.create_plot('training convergence',MultipleSeriesPlot)
try:
assert numpy.sum(ms_outs) == n_outs
except AssertionError:
logger.critical('the summation of multi-stream outputs does not equal to %d' %(n_outs))
raise
####parameters#####
finetune_lr = float(hyper_params['learning_rate'])
training_epochs = int(hyper_params['training_epochs'])
batch_size = int(hyper_params['batch_size'])
l1_reg = float(hyper_params['l1_reg'])
l2_reg = float(hyper_params['l2_reg'])
private_l2_reg = float(hyper_params['private_l2_reg'])
warmup_epoch = int(hyper_params['warmup_epoch'])
momentum = float(hyper_params['momentum'])
warmup_momentum = float(hyper_params['warmup_momentum'])
use_rprop = int(hyper_params['use_rprop'])
hidden_layers_sizes = hyper_params['hidden_layer_size']
stream_weights = hyper_params['stream_weights']
private_hidden_sizes = hyper_params['private_hidden_sizes']
buffer_utt_size = buffer_size
early_stop_epoch = int(hyper_params['early_stop_epochs'])
hidden_activation = hyper_params['hidden_activation']
output_activation = hyper_params['output_activation']
stream_lr_weights = hyper_params['stream_lr_weights']
use_private_hidden = hyper_params['use_private_hidden']
model_type = hyper_params['model_type']
## use a switch to turn on pretraining
## pretraining may not help too much, if this case, we turn it off to save time
do_pretraining = hyper_params['do_pretraining']
pretraining_epochs = int(hyper_params['pretraining_epochs'])
pretraining_lr = float(hyper_params['pretraining_lr'])
buffer_size = int(buffer_size / batch_size) * batch_size
###################
(train_x_file_list, train_y_file_list) = train_xy_file_list
(valid_x_file_list, valid_y_file_list) = valid_xy_file_list
logger.debug('Creating training data provider')
train_data_reader = ListDataProvider(x_file_list = train_x_file_list, y_file_list = train_y_file_list, n_ins = n_ins, n_outs = n_outs, buffer_size = buffer_size, shuffle = True)
logger.debug('Creating validation data provider')
valid_data_reader = ListDataProvider(x_file_list = valid_x_file_list, y_file_list = valid_y_file_list, n_ins = n_ins, n_outs = n_outs, buffer_size = buffer_size, shuffle = False)
shared_train_set_xy, temp_train_set_x, temp_train_set_y = train_data_reader.load_next_partition()
train_set_x, train_set_y = shared_train_set_xy
shared_valid_set_xy, temp_valid_set_x, temp_valid_set_y = valid_data_reader.load_next_partition()
valid_set_x, valid_set_y = shared_valid_set_xy
train_data_reader.reset()
valid_data_reader.reset()
##temporally we use the training set as pretrain_set_x.
##we need to support any data for pretraining
pretrain_set_x = train_set_x
# numpy random generator
numpy_rng = numpy.random.RandomState(123)
logger.info('building the model')
dnn_model = None
pretrain_fn = None ## not all the model support pretraining right now
train_fn = None
valid_fn = None
valid_model = None ## valid_fn and valid_model are the same. reserve to computer multi-stream distortion
if model_type == 'DNN':
dnn_model = MixtureDensityNetwork(numpy_rng=numpy_rng, n_ins=n_ins, n_outs = n_outs,
l1_reg = l1_reg, l2_reg = l2_reg,
hidden_layers_sizes = hidden_layers_sizes,
hidden_activation = hidden_activation,
output_activation = output_activation, var_floor = var_floor,
n_component = mdn_component,
use_rprop = use_rprop, rprop_init_update=finetune_lr,
beta_opt=beta_opt, eff_sample_size=eff_sample_size, mean_log_det=mean_log_det)
# dnn_model = DNN(numpy_rng=numpy_rng, n_ins=n_ins, n_outs = n_outs,
# l1_reg = l1_reg, l2_reg = l2_reg,
# hidden_layers_sizes = hidden_layers_sizes,
# hidden_activation = hidden_activation,
# output_activation = output_activation)
train_fn, valid_fn = dnn_model.build_finetune_functions(
(train_set_x, train_set_y), (valid_set_x, valid_set_y), batch_size=batch_size)
else:
logger.critical('%s type NN model is not supported!' %(model_type))
raise
## We can't just unpickle the old model and use that because fine-tune functions
## depend on opt_l2e option used in construction of initial model. One way around this
## would be to unpickle, manually set unpickled_dnn_model.opt_l2e=True and then call
## unpickled_dnn_model.build_finetne_function() again. This is another way, construct
## new model from scratch with opt_l2e=True, then copy existing weights over:
if start_from_trained_model != '_':
logger.info('load parameters from existing model: %s' %(start_from_trained_model))
if not os.path.isfile(start_from_trained_model):
sys.exit('Model file %s does not exist'%(start_from_trained_model))
existing_dnn_model = pickle.load(open(start_from_trained_model, 'rb'))
if not len(existing_dnn_model.params) == len(dnn_model.params):
sys.exit('Old and new models have different numbers of weight matrices')
for (old_weight, new_weight) in zip(existing_dnn_model.params, dnn_model.params):
old_val = old_weight.get_value()
new_val = new_weight.get_value()
if numpy.shape(old_val) == numpy.shape(new_val):
new_weight.set_value(old_val)
else:
sys.exit('old and new weight matrices have different shapes')
logger.info('fine-tuning the %s model' %(model_type))
start_time = time.clock()
best_dnn_model = dnn_model
best_validation_loss = sys.float_info.max
previous_loss = sys.float_info.max
early_stop = 0
epoch = 0
previous_finetune_lr = finetune_lr
while (epoch < training_epochs): #training_epochs
epoch = epoch + 1
current_momentum = momentum
current_finetune_lr = finetune_lr
if epoch <= warmup_epoch:
current_finetune_lr = finetune_lr
current_momentum = warmup_momentum
else:
current_finetune_lr = previous_finetune_lr * 0.5
previous_finetune_lr = current_finetune_lr
train_error = []
sub_start_time = time.clock()
while (not train_data_reader.is_finish()):
shared_train_set_xy, temp_train_set_x, temp_train_set_y = train_data_reader.load_next_partition()
train_set_x.set_value(numpy.asarray(temp_train_set_x, dtype=theano.config.floatX), borrow=True)
train_set_y.set_value(numpy.asarray(temp_train_set_y, dtype=theano.config.floatX), borrow=True)
n_train_batches = train_set_x.get_value().shape[0] / batch_size
logger.debug('this partition: %d frames (divided into %d batches of size %d)' %(train_set_x.get_value(borrow=True).shape[0], n_train_batches, batch_size) )
for minibatch_index in range(n_train_batches):
this_train_error = train_fn(minibatch_index, current_finetune_lr, current_momentum)
train_error.append(this_train_error)
if numpy.isnan(this_train_error):
logger.warning('training error over minibatch %d of %d was %s' % (minibatch_index+1,n_train_batches,this_train_error) )
train_data_reader.reset()
logger.debug('calculating validation loss')
validation_losses = valid_fn()
this_validation_loss = numpy.mean(validation_losses)
# this has a possible bias if the minibatches were not all of identical size
# but it should not be siginficant if minibatches are small
this_train_valid_loss = numpy.mean(train_error)
sub_end_time = time.clock()
loss_difference = this_validation_loss - previous_loss
logger.info('epoch %i, validation error %f, train error %f time spent %.2f' %(epoch, this_validation_loss, this_train_valid_loss, (sub_end_time - sub_start_time)))
if plot:
plotlogger.add_plot_point('training convergence','validation set',(epoch,this_validation_loss))
plotlogger.add_plot_point('training convergence','training set',(epoch,this_train_valid_loss))
plotlogger.save_plot('training convergence',title='Optimisation progress',xlabel='training epochs',ylabel='objective function')
if this_validation_loss < best_validation_loss:
best_dnn_model = dnn_model
best_validation_loss = this_validation_loss
logger.debug('validation loss decreased, so saving model')
early_stop = 0
else:
logger.debug('validation loss did not improve')
dbn = best_dnn_model
early_stop += 1
if early_stop >= early_stop_epoch:
# too many consecutive epochs without surpassing the best model
logger.debug('stopping early')
break
if math.isnan(this_validation_loss):
break
previous_loss = this_validation_loss
end_time = time.clock()
pickle.dump(best_dnn_model, open(nnets_file_name, 'wb'))
logger.info('overall training time: %.2fm validation error %f' % ((end_time - start_time) / 60., best_validation_loss))
if plot:
plotlogger.save_plot('training convergence',title='Final training and validation error',xlabel='epochs',ylabel='error')
return best_validation_loss
def dnn_generation(valid_file_list, nnets_file_name, n_ins, n_outs, out_file_list):
logger = logging.getLogger("dnn_generation")
logger.debug('Starting dnn_generation')
plotlogger = logging.getLogger("plotting")
dnn_model = pickle.load(open(nnets_file_name, 'rb'))
# visualize_dnn(dbn)
file_number = len(valid_file_list)
for i in range(file_number):
logger.info('generating %4d of %4d: %s' % (i+1,file_number,valid_file_list[i]) )
fid_lab = open(valid_file_list[i], 'rb')
features = numpy.fromfile(fid_lab, dtype=numpy.float32)
fid_lab.close()
features = features[:(n_ins * (features.size / n_ins))]
features = features.reshape((-1, n_ins))
temp_set_x = features.tolist()
test_set_x = theano.shared(numpy.asarray(temp_set_x, dtype=theano.config.floatX))
predicted_parameter = dnn_model.parameter_prediction(test_set_x=test_set_x)
# predicted_parameter = test_out()
### write to cmp file
predicted_parameter = numpy.array(predicted_parameter, 'float32')
temp_parameter = predicted_parameter
fid = open(out_file_list[i], 'wb')
predicted_parameter.tofile(fid)
logger.debug('saved to %s' % out_file_list[i])
fid.close()
### multiple Gaussian components
def dnn_generation(valid_file_list, nnets_file_name, n_ins, n_outs, out_file_list, target_mean_vector, target_std_vector, out_dimension_dict, file_extension_dict):
logger = logging.getLogger("dnn_generation")
logger.debug('Starting dnn_generation')
inf_float = -1.0e+10
plotlogger = logging.getLogger("plotting")
gen_wav_features = ['mgc', 'lf0', 'bap']
stream_start_index = {}
dimension_index = 0
for feature_name in list(out_dimension_dict.keys()):
stream_start_index[feature_name] = dimension_index
dimension_index += out_dimension_dict[feature_name]
dnn_model = pickle.load(open(nnets_file_name, 'rb'))
file_number = len(valid_file_list)
io_funcs = BinaryIOCollection()
mlpg = MLParameterGenerationFast()
for i in range(file_number):
logger.info('generating %4d of %4d: %s' % (i+1,file_number,valid_file_list[i]) )
fid_lab = open(valid_file_list[i], 'rb')
features = numpy.fromfile(fid_lab, dtype=numpy.float32)
fid_lab.close()
features = features[:(n_ins * (features.size / n_ins))]
features = features.reshape((-1, n_ins))
frame_number = features.shape[0]
test_set_x = theano.shared(numpy.asarray(features, dtype=theano.config.floatX))
mean_matrix = numpy.tile(target_mean_vector, (features.shape[0], 1))
std_matrix = numpy.tile(target_std_vector, (features.shape[0], 1))
predicted_mix = dnn_model.parameter_prediction_mix(test_set_x = test_set_x)
max_index = numpy.argmax(predicted_mix, axis=1)
temp_predicted_mu = dnn_model.parameter_prediction(test_set_x=test_set_x)
temp_predicted_sigma = dnn_model.parameter_prediction_sigma(test_set_x = test_set_x)
predicted_mu = numpy.zeros((temp_predicted_mu.shape[0], n_outs))
predicted_sigma = numpy.zeros((temp_predicted_sigma.shape[0], n_outs))
for kk in range(temp_predicted_mu.shape[0]):
predicted_mu[kk, :] = temp_predicted_mu[kk, max_index[kk]*n_outs:(max_index[kk]+1)*n_outs]
predicted_sigma[kk, :] = temp_predicted_sigma[kk, max_index[kk]*n_outs:(max_index[kk]+1)*n_outs]
# print predicted_mu.shape
# predicted_mu = predicted_mu[aa*n_outs:(aa+1)*n_outs]
predicted_mu = predicted_mu * std_matrix + mean_matrix
predicted_sigma = ((predicted_sigma ** 0.5) * std_matrix ) ** 2
dir_name = os.path.dirname(out_file_list[i])
file_id = os.path.splitext(os.path.basename(out_file_list[i]))[0]
mlpg = MLParameterGenerationFast()
for feature_name in gen_wav_features:
current_features = predicted_mu[:, stream_start_index[feature_name]:stream_start_index[feature_name]+out_dimension_dict[feature_name]]
current_sigma = predicted_sigma[:, stream_start_index[feature_name]:stream_start_index[feature_name]+out_dimension_dict[feature_name]]
gen_features = mlpg.generation(current_features, current_sigma, out_dimension_dict[feature_name]/3)
if feature_name == 'lf0':
if 'vuv' in stream_start_index:
vuv_feature = predicted_mu[:, stream_start_index['vuv']:stream_start_index['vuv']+1]
for i in range(frame_number):
if vuv_feature[i, 0] < 0.5:
gen_features[i, 0] = inf_float
# print gen_features
new_file_name = os.path.join(dir_name, file_id + file_extension_dict[feature_name])
io_funcs.array_to_binary_file(gen_features, new_file_name)
##generate bottleneck layer as festures
def dnn_hidden_generation(valid_file_list, nnets_file_name, n_ins, n_outs, out_file_list):
logger = logging.getLogger("dnn_generation")
logger.debug('Starting dnn_generation')
plotlogger = logging.getLogger("plotting")
dnn_model = pickle.load(open(nnets_file_name, 'rb'))
file_number = len(valid_file_list)
for i in range(file_number):
logger.info('generating %4d of %4d: %s' % (i+1,file_number,valid_file_list[i]) )
fid_lab = open(valid_file_list[i], 'rb')
features = numpy.fromfile(fid_lab, dtype=numpy.float32)
fid_lab.close()
features = features[:(n_ins * (features.size / n_ins))]
features = features.reshape((-1, n_ins))
temp_set_x = features.tolist()
test_set_x = theano.shared(numpy.asarray(temp_set_x, dtype=theano.config.floatX))
predicted_parameter = dnn_model.generate_top_hidden_layer(test_set_x=test_set_x)
### write to cmp file
predicted_parameter = numpy.array(predicted_parameter, 'float32')
temp_parameter = predicted_parameter
fid = open(out_file_list[i], 'wb')
predicted_parameter.tofile(fid)
logger.debug('saved to %s' % out_file_list[i])
fid.close()
def main_function(cfg):
# get a logger for this main function
logger = logging.getLogger("main")
# get another logger to handle plotting duties
plotlogger = logging.getLogger("plotting")
# later, we might do this via a handler that is created, attached and configured
# using the standard config mechanism of the logging module
# but for now we need to do it manually
plotlogger.set_plot_path(cfg.plot_dir)
#### parameter setting########
hidden_layers_sizes = cfg.hyper_params['hidden_layer_size']
####prepare environment
try:
file_id_list = read_file_list(cfg.file_id_scp)
logger.debug('Loaded file id list from %s' % cfg.file_id_scp)
except IOError:
# this means that open(...) threw an error
logger.critical('Could not load file id list from %s' % cfg.file_id_scp)
raise
###total file number including training, development, and testing
total_file_number = len(file_id_list)
data_dir = cfg.data_dir
nn_cmp_dir = os.path.join(data_dir, 'nn' + cfg.combined_feature_name + '_' + str(cfg.cmp_dim))
nn_cmp_norm_dir = os.path.join(data_dir, 'nn_norm' + cfg.combined_feature_name + '_' + str(cfg.cmp_dim))
model_dir = os.path.join(cfg.work_dir, 'nnets_model')
gen_dir = os.path.join(cfg.work_dir, 'gen')
in_file_list_dict = {}
for feature_name in list(cfg.in_dir_dict.keys()):
in_file_list_dict[feature_name] = prepare_file_path_list(file_id_list, cfg.in_dir_dict[feature_name], cfg.file_extension_dict[feature_name], False)
nn_cmp_file_list = prepare_file_path_list(file_id_list, nn_cmp_dir, cfg.cmp_ext)
nn_cmp_norm_file_list = prepare_file_path_list(file_id_list, nn_cmp_norm_dir, cfg.cmp_ext)
###normalisation information
norm_info_file = os.path.join(data_dir, 'norm_info' + cfg.combined_feature_name + '_' + str(cfg.cmp_dim) + '_' + cfg.output_feature_normalisation + '.dat')
### normalise input full context label
# currently supporting two different forms of lingustic features
# later, we should generalise this
if cfg.label_style == 'HTS':
label_normaliser = HTSLabelNormalisation(question_file_name=cfg.question_file_name)
lab_dim = label_normaliser.dimension
logger.info('Input label dimension is %d' % lab_dim)
suffix=str(lab_dim)
# no longer supported - use new "composed" style labels instead
elif cfg.label_style == 'composed':
# label_normaliser = XMLLabelNormalisation(xpath_file_name=cfg.xpath_file_name)
suffix='composed'
if cfg.process_labels_in_work_dir:
label_data_dir = cfg.work_dir
else:
label_data_dir = data_dir
# the number can be removed
binary_label_dir = os.path.join(label_data_dir, 'binary_label_'+suffix)
nn_label_dir = os.path.join(label_data_dir, 'nn_no_silence_lab_'+suffix)
nn_label_norm_dir = os.path.join(label_data_dir, 'nn_no_silence_lab_norm_'+suffix)
# nn_label_norm_mvn_dir = os.path.join(data_dir, 'nn_no_silence_lab_norm_'+suffix)
in_label_align_file_list = prepare_file_path_list(file_id_list, cfg.in_label_align_dir, cfg.lab_ext, False)
binary_label_file_list = prepare_file_path_list(file_id_list, binary_label_dir, cfg.lab_ext)
nn_label_file_list = prepare_file_path_list(file_id_list, nn_label_dir, cfg.lab_ext)
nn_label_norm_file_list = prepare_file_path_list(file_id_list, nn_label_norm_dir, cfg.lab_ext)
# to do - sanity check the label dimension here?
min_max_normaliser = None
label_norm_file = 'label_norm_%s.dat' %(cfg.label_style)
label_norm_file = os.path.join(label_data_dir, label_norm_file)
if cfg.NORMLAB and (cfg.label_style == 'HTS'):
# simple HTS labels
logger.info('preparing label data (input) using standard HTS style labels')
label_normaliser.perform_normalisation(in_label_align_file_list, binary_label_file_list)
remover = SilenceRemover(n_cmp = lab_dim, silence_pattern = ['*-#+*'])
remover.remove_silence(binary_label_file_list, in_label_align_file_list, nn_label_file_list)
min_max_normaliser = MinMaxNormalisation(feature_dimension = lab_dim, min_value = 0.01, max_value = 0.99)
###use only training data to find min-max information, then apply on the whole dataset
min_max_normaliser.find_min_max_values(nn_label_file_list[0:cfg.train_file_number])
min_max_normaliser.normalise_data(nn_label_file_list, nn_label_norm_file_list)
if cfg.NORMLAB and (cfg.label_style == 'composed'):
# new flexible label preprocessor
logger.info('preparing label data (input) using "composed" style labels')
label_composer = LabelComposer()
label_composer.load_label_configuration(cfg.label_config_file)
logger.info('Loaded label configuration')
# logger.info('%s' % label_composer.configuration.labels )
lab_dim=label_composer.compute_label_dimension()
logger.info('label dimension will be %d' % lab_dim)
if cfg.precompile_xpaths:
label_composer.precompile_xpaths()
# there are now a set of parallel input label files (e.g, one set of HTS and another set of Ossian trees)
# create all the lists of these, ready to pass to the label composer
in_label_align_file_list = {}
for label_style, label_style_required in label_composer.label_styles.items():
if label_style_required:
logger.info('labels of style %s are required - constructing file paths for them' % label_style)
if label_style == 'xpath':
in_label_align_file_list['xpath'] = prepare_file_path_list(file_id_list, cfg.xpath_label_align_dir, cfg.utt_ext, False)
elif label_style == 'hts':
in_label_align_file_list['hts'] = prepare_file_path_list(file_id_list, cfg.hts_label_align_dir, cfg.lab_ext, False)
else:
logger.critical('unsupported label style %s specified in label configuration' % label_style)
raise Exception
# now iterate through the files, one at a time, constructing the labels for them
num_files=len(file_id_list)
logger.info('the label styles required are %s' % label_composer.label_styles)
for i in range(num_files):
logger.info('making input label features for %4d of %4d' % (i+1,num_files))
# iterate through the required label styles and open each corresponding label file
# a dictionary of file descriptors, pointing at the required files
required_labels={}
for label_style, label_style_required in label_composer.label_styles.items():
# the files will be a parallel set of files for a single utterance
# e.g., the XML tree and an HTS label file
if label_style_required:
required_labels[label_style] = open(in_label_align_file_list[label_style][i] , 'r')
logger.debug(' opening label file %s' % in_label_align_file_list[label_style][i])
logger.debug('label styles with open files: %s' % required_labels)
label_composer.make_labels(required_labels,out_file_name=binary_label_file_list[i],fill_missing_values=cfg.fill_missing_values,iterate_over_frames=cfg.iterate_over_frames)
# now close all opened files
for fd in required_labels.values():
fd.close()
# silence removal
if cfg.remove_silence_using_binary_labels:
silence_feature = 0 ## use first feature in label -- hardcoded for now
logger.info('Silence removal from label using silence feature: %s'%(label_composer.configuration.labels[silence_feature]))
logger.info('Silence will be removed from CMP files in same way')
## Binary labels have 2 roles: both the thing trimmed and the instructions for trimming:
trim_silence(binary_label_file_list, nn_label_file_list, lab_dim, \
binary_label_file_list, lab_dim, silence_feature, percent_to_keep=5)
else:
logger.info('No silence removal done')
# start from the labels we have just produced, not trimmed versions
nn_label_file_list = binary_label_file_list
min_max_normaliser = MinMaxNormalisation(feature_dimension = lab_dim, min_value = 0.01, max_value = 0.99)
###use only training data to find min-max information, then apply on the whole dataset
min_max_normaliser.find_min_max_values(nn_label_file_list[0:cfg.train_file_number])
min_max_normaliser.normalise_data(nn_label_file_list, nn_label_norm_file_list)
if min_max_normaliser != None:
### save label normalisation information for unseen testing labels
label_min_vector = min_max_normaliser.min_vector
label_max_vector = min_max_normaliser.max_vector
label_norm_info = numpy.concatenate((label_min_vector, label_max_vector), axis=0)
label_norm_info = numpy.array(label_norm_info, 'float32')
fid = open(label_norm_file, 'wb')
label_norm_info.tofile(fid)
fid.close()
logger.info('saved %s vectors to %s' %(label_min_vector.size, label_norm_file))
### make output acoustic data
if cfg.MAKECMP:
logger.info('creating acoustic (output) features')
delta_win = [-0.5, 0.0, 0.5]
acc_win = [1.0, -2.0, 1.0]
acoustic_worker = AcousticComposition(delta_win = delta_win, acc_win = acc_win)
acoustic_worker.prepare_nn_data(in_file_list_dict, nn_cmp_file_list, cfg.in_dimension_dict, cfg.out_dimension_dict)
if cfg.remove_silence_using_binary_labels:
## do this to get lab_dim:
label_composer = LabelComposer()
label_composer.load_label_configuration(cfg.label_config_file)
lab_dim=label_composer.compute_label_dimension()
silence_feature = 0 ## use first feature in label -- hardcoded for now
logger.info('Silence removal from CMP using binary label file')
## overwrite the untrimmed audio with the trimmed version:
trim_silence(nn_cmp_file_list, nn_cmp_file_list, cfg.cmp_dim, \
binary_label_file_list, lab_dim, silence_feature, percent_to_keep=5)
else: ## back off to previous method using HTS labels:
remover = SilenceRemover(n_cmp = cfg.cmp_dim, silence_pattern = ['*-#+*'])
remover.remove_silence(nn_cmp_file_list, in_label_align_file_list, nn_cmp_file_list) # save to itself
### save acoustic normalisation information for normalising the features back
var_dir = os.path.join(data_dir, 'var')
if not os.path.exists(var_dir):
os.makedirs(var_dir)
var_file_dict = {}
for feature_name in list(cfg.out_dimension_dict.keys()):
var_file_dict[feature_name] = os.path.join(var_dir, feature_name + '_' + str(cfg.out_dimension_dict[feature_name]))
### normalise output acoustic data
if cfg.NORMCMP:
logger.info('normalising acoustic (output) features using method %s' % cfg.output_feature_normalisation)
cmp_norm_info = None
if cfg.output_feature_normalisation == 'MVN':
normaliser = MeanVarianceNorm(feature_dimension=cfg.cmp_dim)
###calculate mean and std vectors on the training data, and apply on the whole dataset
global_mean_vector = normaliser.compute_mean(nn_cmp_file_list[0:cfg.train_file_number], 0, cfg.cmp_dim)
global_std_vector = normaliser.compute_std(nn_cmp_file_list[0:cfg.train_file_number], global_mean_vector, 0, cfg.cmp_dim)
normaliser.feature_normalisation(nn_cmp_file_list, nn_cmp_norm_file_list)
cmp_norm_info = numpy.concatenate((global_mean_vector, global_std_vector), axis=0)
elif cfg.output_feature_normalisation == 'MINMAX':
min_max_normaliser = MinMaxNormalisation(feature_dimension = cfg.cmp_dim)
global_mean_vector = min_max_normaliser.compute_mean(nn_cmp_file_list[0:cfg.train_file_number])
global_std_vector = min_max_normaliser.compute_std(nn_cmp_file_list[0:cfg.train_file_number], global_mean_vector)
min_max_normaliser = MinMaxNormalisation(feature_dimension = cfg.cmp_dim, min_value = 0.01, max_value = 0.99)
min_max_normaliser.find_min_max_values(nn_cmp_file_list[0:cfg.train_file_number])
min_max_normaliser.normalise_data(nn_cmp_file_list, nn_cmp_norm_file_list)
cmp_min_vector = min_max_normaliser.min_vector
cmp_max_vector = min_max_normaliser.max_vector
cmp_norm_info = numpy.concatenate((cmp_min_vector, cmp_max_vector), axis=0)
else:
logger.critical('Normalisation type %s is not supported!\n' %(cfg.output_feature_normalisation))
raise
cmp_norm_info = numpy.array(cmp_norm_info, 'float32')
fid = open(norm_info_file, 'wb')
cmp_norm_info.tofile(fid)
fid.close()
logger.info('saved %s vectors to %s' %(cfg.output_feature_normalisation, norm_info_file))
# logger.debug(' value was\n%s' % cmp_norm_info)
feature_index = 0
for feature_name in list(cfg.out_dimension_dict.keys()):
feature_std_vector = numpy.array(global_std_vector[:,feature_index:feature_index+cfg.out_dimension_dict[feature_name]], 'float32')
fid = open(var_file_dict[feature_name], 'w')
feature_std_vector.tofile(fid)
fid.close()
logger.info('saved %s variance vector to %s' %(feature_name, var_file_dict[feature_name]))
# logger.debug(' value was\n%s' % feature_std_vector)
feature_index += cfg.out_dimension_dict[feature_name]
train_x_file_list = nn_label_norm_file_list[0:cfg.train_file_number]
train_y_file_list = nn_cmp_norm_file_list[0:cfg.train_file_number]
valid_x_file_list = nn_label_norm_file_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number]
valid_y_file_list = nn_cmp_norm_file_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number]
test_x_file_list = nn_label_norm_file_list[cfg.train_file_number+cfg.valid_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
test_y_file_list = nn_cmp_norm_file_list[cfg.train_file_number+cfg.valid_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
# we need to know the label dimension before training the DNN
# computing that requires us to look at the labels
#
# currently, there are two ways to do this
if cfg.label_style == 'HTS':
label_normaliser = HTSLabelNormalisation(question_file_name=cfg.question_file_name)
lab_dim = label_normaliser.dimension
elif cfg.label_style == 'composed':
label_composer = LabelComposer()
label_composer.load_label_configuration(cfg.label_config_file)
lab_dim=label_composer.compute_label_dimension()
logger.info('label dimension is %d' % lab_dim)
combined_model_arch = str(len(hidden_layers_sizes))
for hid_size in hidden_layers_sizes:
combined_model_arch += '_' + str(hid_size)
# nnets_file_name = '%s/%s_%s_%d.%d.%d.%d.%d.train.%d.model' \
# %(model_dir, cfg.model_type, cfg.combined_feature_name, int(cfg.multistream_switch),
# len(hidden_layers_sizes), hidden_layers_sizes[0],
# lab_dim, cfg.cmp_dim, cfg.train_file_number)
nnets_file_name = '%s/%s_%s_%d_%s_%d.%d.train.%d.mdn.model' \
%(model_dir, cfg.model_type, cfg.combined_feature_name, int(cfg.multistream_switch),
combined_model_arch, lab_dim, cfg.cmp_dim, cfg.train_file_number)
### DNN model training
if cfg.TRAINDNN:
logger.info('training DNN')
try:
os.makedirs(model_dir)
except OSError as e:
if e.errno == errno.EEXIST:
# not an error - just means directory already exists
pass
else:
logger.critical('Failed to create model directory %s' % model_dir)
logger.critical(' OS error was: %s' % e.strerror)
raise
try:
# print 'start DNN'
train_DNN(train_xy_file_list = (train_x_file_list, train_y_file_list), \
valid_xy_file_list = (valid_x_file_list, valid_y_file_list), \
nnets_file_name = nnets_file_name, \
n_ins = lab_dim, n_outs = cfg.cmp_dim, ms_outs = cfg.multistream_outs, \
hyper_params = cfg.hyper_params, buffer_size = cfg.buffer_size, \
mdn_component=cfg.mdn_component, var_floor=cfg.var_floor, \
plot = cfg.plot, beta_opt=cfg.beta_opt, \
eff_sample_size=cfg.eff_sample_size, mean_log_det=cfg.mean_log_det, \
start_from_trained_model=cfg.start_from_trained_model)
except KeyboardInterrupt:
logger.critical('train_DNN interrupted via keyboard')
# Could 'raise' the exception further, but that causes a deep traceback to be printed
# which we don't care about for a keyboard interrupt. So, just bail out immediately
sys.exit(1)
except:
logger.critical('train_DNN threw an exception')
raise
### generate parameters from DNN
temp_dir_name = '%s_%s_%d_%d_%d_%d_%d_%d' \
%(cfg.model_type, cfg.combined_feature_name, int(cfg.do_post_filtering), \
cfg.train_file_number, lab_dim, cfg.cmp_dim, \
len(hidden_layers_sizes), hidden_layers_sizes[0])
gen_dir = os.path.join(gen_dir, temp_dir_name)
gen_file_id_list = file_id_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
test_x_file_list = nn_label_norm_file_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
if cfg.DNNGEN:
logger.info('generating from DNN')
try:
os.makedirs(gen_dir)
except OSError as e:
if e.errno == errno.EEXIST:
# not an error - just means directory already exists
pass
else:
logger.critical('Failed to create generation directory %s' % gen_dir)
logger.critical(' OS error was: %s' % e.strerror)
raise
gen_file_list = prepare_file_path_list(gen_file_id_list, gen_dir, cfg.cmp_ext)
fid = open(norm_info_file, 'rb')
cmp_min_max = numpy.fromfile(fid, dtype=numpy.float32)
fid.close()
cmp_min_max = cmp_min_max.reshape((2, -1))
target_mean_vector = cmp_min_max[0, ]
target_std_vector = cmp_min_max[1, ]
# dnn_generation(valid_x_file_list, nnets_file_name, lab_dim, cfg.cmp_dim, gen_file_list)
# dnn_generation(test_x_file_list, nnets_file_name, lab_dim, cfg.cmp_dim, gen_file_list)
dnn_generation(test_x_file_list, nnets_file_name, lab_dim, cfg.cmp_dim, gen_file_list, target_mean_vector, target_std_vector, cfg.out_dimension_dict, cfg.file_extension_dict)
### generate wav
if cfg.GENWAV:
logger.info('reconstructing waveform(s)')
generate_wav(gen_dir, gen_file_id_list, cfg) # generated speech
# generate_wav(nn_cmp_dir, gen_file_id_list) # reference copy synthesis speech
### evaluation: calculate distortion
if cfg.CALMCD:
logger.info('calculating MCD')
ref_data_dir = os.path.join(data_dir, 'ref_data')
ref_mgc_list = prepare_file_path_list(gen_file_id_list, ref_data_dir, cfg.mgc_ext)
ref_bap_list = prepare_file_path_list(gen_file_id_list, ref_data_dir, cfg.bap_ext)
ref_lf0_list = prepare_file_path_list(gen_file_id_list, ref_data_dir, cfg.lf0_ext)
in_gen_label_align_file_list = in_label_align_file_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
calculator = IndividualDistortionComp()
spectral_distortion = 0.0
bap_mse = 0.0
f0_mse = 0.0
vuv_error = 0.0
valid_file_id_list = file_id_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number]
test_file_id_list = file_id_list[cfg.train_file_number+cfg.valid_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
if cfg.remove_silence_using_binary_labels:
## get lab_dim:
label_composer = LabelComposer()
label_composer.load_label_configuration(cfg.label_config_file)
lab_dim=label_composer.compute_label_dimension()
## use first feature in label -- hardcoded for now
silence_feature = 0
## Use these to trim silence:
untrimmed_test_labels = binary_label_file_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
if 'mgc' in cfg.in_dimension_dict:
if cfg.remove_silence_using_binary_labels:
untrimmed_reference_data = in_file_list_dict['mgc'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
trim_silence(untrimmed_reference_data, ref_mgc_list, cfg.mgc_dim, \
untrimmed_test_labels, lab_dim, silence_feature)
else:
remover = SilenceRemover(n_cmp = cfg.mgc_dim, silence_pattern = ['*-#+*'])
remover.remove_silence(in_file_list_dict['mgc'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number], in_gen_label_align_file_list, ref_mgc_list)
valid_spectral_distortion = calculator.compute_distortion(valid_file_id_list, ref_data_dir, gen_dir, cfg.mgc_ext, cfg.mgc_dim)
test_spectral_distortion = calculator.compute_distortion(test_file_id_list , ref_data_dir, gen_dir, cfg.mgc_ext, cfg.mgc_dim)
valid_spectral_distortion *= (10 /numpy.log(10)) * numpy.sqrt(2.0) ##MCD
test_spectral_distortion *= (10 /numpy.log(10)) * numpy.sqrt(2.0) ##MCD
if 'bap' in cfg.in_dimension_dict:
if cfg.remove_silence_using_binary_labels:
untrimmed_reference_data = in_file_list_dict['bap'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
trim_silence(untrimmed_reference_data, ref_bap_list, cfg.bap_dim, \
untrimmed_test_labels, lab_dim, silence_feature)
else:
remover = SilenceRemover(n_cmp = cfg.bap_dim, silence_pattern = ['*-#+*'])
remover.remove_silence(in_file_list_dict['bap'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number], in_gen_label_align_file_list, ref_bap_list)
valid_bap_mse = calculator.compute_distortion(valid_file_id_list, ref_data_dir, gen_dir, cfg.bap_ext, cfg.bap_dim)
test_bap_mse = calculator.compute_distortion(test_file_id_list , ref_data_dir, gen_dir, cfg.bap_ext, cfg.bap_dim)
valid_bap_mse = valid_bap_mse / 10.0 ##Cassia's bap is computed from 10*log|S(w)|. if use HTS/SPTK style, do the same as MGC
test_bap_mse = test_bap_mse / 10.0 ##Cassia's bap is computed from 10*log|S(w)|. if use HTS/SPTK style, do the same as MGC
if 'lf0' in cfg.in_dimension_dict:
if cfg.remove_silence_using_binary_labels:
untrimmed_reference_data = in_file_list_dict['lf0'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
trim_silence(untrimmed_reference_data, ref_lf0_list, cfg.lf0_dim, \
untrimmed_test_labels, lab_dim, silence_feature)
else:
remover = SilenceRemover(n_cmp = cfg.lf0_dim, silence_pattern = ['*-#+*'])
remover.remove_silence(in_file_list_dict['lf0'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number], in_gen_label_align_file_list, ref_lf0_list)
valid_f0_mse, valid_vuv_error = calculator.compute_distortion(valid_file_id_list, ref_data_dir, gen_dir, cfg.lf0_ext, cfg.lf0_dim)
test_f0_mse , test_vuv_error = calculator.compute_distortion(test_file_id_list , ref_data_dir, gen_dir, cfg.lf0_ext, cfg.lf0_dim)
logger.info('Develop: DNN -- MCD: %.3f dB; BAP: %.3f dB; F0: %.3f Hz; VUV: %.3f%%' \
%(valid_spectral_distortion, valid_bap_mse, valid_f0_mse, valid_vuv_error*100.))
logger.info('Test : DNN -- MCD: %.3f dB; BAP: %.3f dB; F0: %.3f Hz; VUV: %.3f%%' \
%(test_spectral_distortion , test_bap_mse , test_f0_mse , test_vuv_error*100.))
# this can be removed
#
if 0: #to calculate distortion of HMM baseline
hmm_gen_no_silence_dir = '/afs/inf.ed.ac.uk/group/project/dnn_tts/data/nick/nick_hmm_pf_2400_no_silence'
hmm_gen_dir = '/afs/inf.ed.ac.uk/group/project/dnn_tts/data/nick/nick_hmm_pf_2400'
if 1:
hmm_mgc_list = prepare_file_path_list(gen_file_id_list, hmm_gen_dir, cfg.mgc_ext)
hmm_bap_list = prepare_file_path_list(gen_file_id_list, hmm_gen_dir, cfg.bap_ext)
hmm_lf0_list = prepare_file_path_list(gen_file_id_list, hmm_gen_dir, cfg.lf0_ext)
hmm_mgc_no_silence_list = prepare_file_path_list(gen_file_id_list, hmm_gen_no_silence_dir, cfg.mgc_ext)
hmm_bap_no_silence_list = prepare_file_path_list(gen_file_id_list, hmm_gen_no_silence_dir, cfg.bap_ext)
hmm_lf0_no_silence_list = prepare_file_path_list(gen_file_id_list, hmm_gen_no_silence_dir, cfg.lf0_ext)
in_gen_label_align_file_list = in_label_align_file_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
remover = SilenceRemover(n_cmp = cfg.mgc_dim, silence_pattern = ['*-#+*'])
remover.remove_silence(hmm_mgc_list, in_gen_label_align_file_list, hmm_mgc_no_silence_list)
remover = SilenceRemover(n_cmp = cfg.bap_dim, silence_pattern = ['*-#+*'])
remover.remove_silence(hmm_bap_list, in_gen_label_align_file_list, hmm_bap_no_silence_list)
remover = SilenceRemover(n_cmp = cfg.lf0_dim, silence_pattern = ['*-#+*'])
remover.remove_silence(hmm_lf0_list, in_gen_label_align_file_list, hmm_lf0_no_silence_list)
calculator = IndividualDistortionComp()
spectral_distortion = calculator.compute_distortion(valid_file_id_list, ref_data_dir, hmm_gen_no_silence_dir, cfg.mgc_ext, cfg.mgc_dim)
bap_mse = calculator.compute_distortion(valid_file_id_list, ref_data_dir, hmm_gen_no_silence_dir, cfg.bap_ext, cfg.bap_dim)
f0_mse, vuv_error = calculator.compute_distortion(valid_file_id_list, ref_data_dir, hmm_gen_no_silence_dir, cfg.lf0_ext, cfg.lf0_dim)
spectral_distortion *= (10 /numpy.log(10)) * numpy.sqrt(2.0)
bap_mse = bap_mse / 10.0
logger.info('Develop: HMM -- MCD: %.3f dB; BAP: %.3f dB; F0: %.3f Hz; VUV: %.3f%%' %(spectral_distortion, bap_mse, f0_mse, vuv_error*100.))
spectral_distortion = calculator.compute_distortion(test_file_id_list, ref_data_dir, hmm_gen_no_silence_dir, cfg.mgc_ext, cfg.mgc_dim)
bap_mse = calculator.compute_distortion(test_file_id_list, ref_data_dir, hmm_gen_no_silence_dir, cfg.bap_ext, cfg.bap_dim)
f0_mse, vuv_error = calculator.compute_distortion(test_file_id_list, ref_data_dir, hmm_gen_no_silence_dir, cfg.lf0_ext, cfg.lf0_dim)
spectral_distortion *= (10 /numpy.log(10)) * numpy.sqrt(2.0)
bap_mse = bap_mse / 10.0
logger.info('Test : HMM -- MCD: %.3f dB; BAP: %.3f dB; F0: %.3f Hz; VUV: %.3f%%' %(spectral_distortion, bap_mse, f0_mse, vuv_error*100.))
if __name__ == '__main__':
# these things should be done even before trying to parse the command line
# create a configuration instance
# and get a short name for this instance
cfg=configuration.cfg
# set up logging to use our custom class
logging.setLoggerClass(LoggerPlotter)
# get a logger for this main function
logger = logging.getLogger("main")
if len(sys.argv) != 2:
logger.critical('usage: run_dnn.sh [config file name]')
sys.exit(1)
config_file = sys.argv[1]
config_file = os.path.abspath(config_file)
cfg.configure(config_file)
if cfg.profile:
logger.info('profiling is activated')
import cProfile, pstats
cProfile.run('main_function(cfg)', 'mainstats')
# create a stream for the profiler to write to
profiling_output = io.StringIO()
p = pstats.Stats('mainstats', stream=profiling_output)
# print stats to that stream
# here we just report the top 10 functions, sorted by total amount of time spent in each
p.strip_dirs().sort_stats('tottime').print_stats(10)
# print the result to the log
logger.info('---Profiling result follows---\n%s' % profiling_output.getvalue() )
profiling_output.close()
logger.info('---End of profiling result---')
else:
main_function(cfg)
sys.exit(0)
|
bajibabu/merlin
|
src/work_in_progress/run_mdn.py
|
Python
|
apache-2.0
| 52,440
|
[
"Gaussian",
"NEURON"
] |
931e4367d0572626937fa97e0f9d7488eadeb39309c89cf63ed3286ea01f62f5
|
########################################################################
# $HeadURL $
# File: RemoveFile.py
# Author: Krzysztof.Ciba@NOSPAMgmail.com
# Date: 2013/03/25 07:44:19
########################################################################
""" :mod: RemoveFile
================
.. module: RemoveFile
:synopsis: removeFile operation handler
.. moduleauthor:: Krzysztof.Ciba@NOSPAMgmail.com
removeFile operation handler
"""
__RCSID__ = "$Id $"
# #
# @file RemoveFile.py
# @author Krzysztof.Ciba@NOSPAMgmail.com
# @date 2013/03/25 07:44:27
# @brief Definition of RemoveFile class.
# # imports
import os
import re
# # from DIRAC
from DIRAC import S_OK, S_ERROR
from DIRAC.FrameworkSystem.Client.MonitoringClient import gMonitor
from DIRAC.DataManagementSystem.Agent.RequestOperations.DMSRequestOperationsBase import DMSRequestOperationsBase
from DIRAC.Resources.Catalog.FileCatalog import FileCatalog
########################################################################
class RemoveFile( DMSRequestOperationsBase ):
"""
.. class:: RemoveFile
remove file operation handler
"""
def __init__( self, operation = None, csPath = None ):
"""c'tor
:param self: self reference
:param Operation operation: Operation to execute
:param str csPath: CS path for this handler
"""
# # call base class ctor
DMSRequestOperationsBase.__init__( self, operation, csPath )
# # gMOnitor stuff goes here
gMonitor.registerActivity( "RemoveFileAtt", "File removals attempted",
"RequestExecutingAgent", "Files/min", gMonitor.OP_SUM )
gMonitor.registerActivity( "RemoveFileOK", "Successful file removals",
"RequestExecutingAgent", "Files/min", gMonitor.OP_SUM )
gMonitor.registerActivity( "RemoveFileFail", "Failed file removals",
"RequestExecutingAgent", "Files/min", gMonitor.OP_SUM )
# # re pattern for not existing files
self.reNotExisting = re.compile( r"(no|not) such file.*", re.IGNORECASE )
def __call__( self ):
""" action for 'removeFile' operation """
# # get waiting files
waitingFiles = self.getWaitingFilesList()
fc = FileCatalog( self.operation.catalogList )
res = fc.getReplicas( [wf.LFN for wf in waitingFiles] )
if not res['OK']:
gMonitor.addMark( "RemoveFileAtt" )
gMonitor.addMark( "RemoveFileFail" )
return res
# We check the status of the SE from the LFN that are successful
# No idea what to do with the others...
replicas = res['Value']['Successful']
targetSEs = set( [se for lfn in replicas for se in replicas[lfn] ] )
if targetSEs:
# Check if SEs are allowed for remove but don't fail yet the operation if SEs are always banned
bannedTargets = self.checkSEsRSS( targetSEs, access = 'RemoveAccess', failIfBanned = False )
if not bannedTargets['OK']:
gMonitor.addMark( "RemoveFileAtt" )
gMonitor.addMark( "RemoveFileFail" )
return bannedTargets
bannedTargets = set( bannedTargets['Value'] )
else:
bannedTargets = set()
# # prepare waiting file dict
# # We take only files that have no replica at the banned SEs... If no replica, don't
toRemoveDict = dict( ( opFile.LFN, opFile ) for opFile in waitingFiles \
if not bannedTargets or not bannedTargets.intersection( replicas.get( opFile.LFN, [] ) ) )
# If some SEs are always banned, set Failed the files that cannot be removed
if bannedTargets and 'always banned' in self.operation.Error:
for opFile in waitingFiles:
if opFile.LFN not in toRemoveDict:
# Set the files that cannot be removed Failed
opFile.Error = self.operation.Error
opFile.Status = "Failed"
if not toRemoveDict:
# If there are no files that can be removed, exit, else try once to remove them anyway
return S_OK( "%s targets are always banned for removal" % ",".join( sorted( bannedTargets ) ) )
if toRemoveDict:
gMonitor.addMark( "RemoveFileAtt", len( toRemoveDict ) )
# # 1st step - bulk removal
self.log.debug( "bulk removal of %s files" % len( toRemoveDict ) )
bulkRemoval = self.bulkRemoval( toRemoveDict )
if not bulkRemoval["OK"]:
self.log.error( "Bulk file removal failed", bulkRemoval["Message"] )
else:
gMonitor.addMark( "RemoveFileOK", len( toRemoveDict ) - len( bulkRemoval["Value"] ) )
toRemoveDict = bulkRemoval["Value"]
# # 2nd step - single file removal
for lfn, opFile in toRemoveDict.iteritems():
self.log.info( "removing single file %s" % lfn )
singleRemoval = self.singleRemoval( opFile )
if not singleRemoval["OK"]:
self.log.error( 'Error removing single file', singleRemoval["Message"] )
gMonitor.addMark( "RemoveFileFail", 1 )
else:
self.log.info( "file %s has been removed" % lfn )
gMonitor.addMark( "RemoveFileOK", 1 )
# # set
failedFiles = [ ( lfn, opFile ) for ( lfn, opFile ) in toRemoveDict.iteritems()
if opFile.Status in ( "Failed", "Waiting" ) ]
if failedFiles:
self.operation.Error = "failed to remove %d files" % len( failedFiles )
if bannedTargets:
return S_OK( "%s targets are banned for removal" % ",".join( sorted( bannedTargets ) ) )
return S_OK()
def bulkRemoval( self, toRemoveDict ):
""" bulk removal using request owner DN
:param dict toRemoveDict: { lfn: opFile, ... }
:return: S_ERROR or S_OK( { lfn: opFile, ... } ) -- dict with files still waiting to be removed
"""
bulkRemoval = self.dm.removeFile( toRemoveDict.keys(), force = True )
if not bulkRemoval["OK"]:
error = bulkRemoval["Message"]
self.log.error( "Bulk file removal failed", error )
self.operation.Error = error
for opFile in self.operation:
opFile.Error = error
return bulkRemoval
bulkRemoval = bulkRemoval["Value"]
# # filter results
for lfn, opFile in toRemoveDict.iteritems():
if lfn in bulkRemoval["Successful"]:
opFile.Status = "Done"
elif lfn in bulkRemoval["Failed"]:
error = bulkRemoval["Failed"][lfn]
if type( error ) == dict:
error = ";".join( [ "%s-%s" % ( k, v ) for k, v in error.iteritems() ] )
opFile.Error = error
if self.reNotExisting.search( opFile.Error ):
opFile.Status = "Done"
# # return files still waiting
toRemoveDict = dict( ( lfn, opFile ) for lfn, opFile in toRemoveDict.iteritems() if opFile.Status == "Waiting" )
return S_OK( toRemoveDict )
def singleRemoval( self, opFile ):
""" remove single file
:param opFile: File instance
"""
# # try to remove with owner proxy
proxyFile = None
if "Write access not permitted for this credential" in opFile.Error:
if "DataManager" in self.shifter:
# # you're a data manager - get proxy for LFN and retry
saveProxy = os.environ["X509_USER_PROXY"]
try:
fileProxy = self.getProxyForLFN( opFile.LFN )
if not fileProxy["OK"]:
opFile.Error = "Error getting owner's proxy : %s" % fileProxy['Message']
else:
proxyFile = fileProxy["Value"]
self.log.info( "Trying to remove file with owner's proxy (file %s)" % proxyFile )
removeFile = self.dm.removeFile( opFile.LFN, force = True )
self.log.always( str( removeFile ) )
if not removeFile["OK"]:
opFile.Error = str( removeFile["Message"] )
if self.reNotExisting.search( str( removeFile["Message"] ).lower() ):
opFile.Status = "Done"
else:
removeFile = removeFile["Value"]
if opFile.LFN in removeFile["Failed"]:
error = removeFile["Failed"][opFile.LFN]
if type( error ) == dict:
error = ";".join( [ "%s-%s" % ( k, v ) for k, v in error.iteritems() ] )
if self.reNotExisting.search( error ):
# This should never happen due to the "force" flag
opFile.Status = "Done"
else:
opFile.Error = error
else:
opFile.Status = "Done"
finally:
if proxyFile:
os.unlink( proxyFile )
# # put back request owner proxy to env
os.environ["X509_USER_PROXY"] = saveProxy
# # file removed? update its status to 'Done'
if opFile.Status == "Done":
return S_OK()
return S_ERROR( opFile.Error )
|
andresailer/DIRAC
|
DataManagementSystem/Agent/RequestOperations/RemoveFile.py
|
Python
|
gpl-3.0
| 8,695
|
[
"DIRAC"
] |
db26bdacb31b262a8f45f28369329b573b224a0d105c3f87a1a44f0f40b07519
|
#!/usr/bin/env python
#pylint: disable=missing-docstring
####################################################################################################
# DO NOT MODIFY THIS HEADER #
# MOOSE - Multiphysics Object Oriented Simulation Environment #
# #
# (c) 2010 Battelle Energy Alliance, LLC #
# ALL RIGHTS RESERVED #
# #
# Prepared by Battelle Energy Alliance, LLC #
# Under Contract No. DE-AC07-05ID14517 #
# With the U. S. Department of Energy #
# #
# See COPYRIGHT for full restrictions #
####################################################################################################
import unittest
from MooseDocs.testing import MarkdownTestCase
class TestMisc(MarkdownTestCase):
"""
Test that misc extension is working. command is work.
"""
EXTENSIONS = ['MooseDocs.extensions.misc']
def testScroll(self):
md = 'Some before content.\n\n## One\nContent\n##Two\n\nMore Content'
self.assertConvert('testScroll.html', md)
if __name__ == '__main__':
unittest.main(verbosity=2)
|
yipenggao/moose
|
python/MooseDocs/tests/misc/test_misc.py
|
Python
|
lgpl-2.1
| 1,803
|
[
"MOOSE"
] |
dcd9c0affe9d869cd76026adc01b4dd855435d86f317b3de962c7e844bf28d0e
|
import numpy as np
import datetime as dt
import matplotlib
import matplotlib.pyplot as plt
import os
#import smooth_2d as s2d
from scipy import signal
from scipy import misc
import warnings #To supress certain warnings.
from mpl_toolkits.basemap import Basemap
#Some module constant section.
default_figure_size=(10,7.5)
def data_diff(data_pp,data_pn):
data_diff=dict()
for key in data_pp:
data_diff[key]=data_pp[key]-data_pn[key]
return data_diff
def norm_bv( data_p , data_n , norm_type='None' , smooth='None' , sigma='None',cutoff='None' , xi='None' , xe='None' , yi='None' , ye='None', zi='None', ze='None'):
#Computes the bv in a similar way as the breeding fortran module does.
#Sigma is the standard deviation of the gaussian filter in units of grid points.
#Smooth is a filter option. Currently gaussian filter is supported.
if norm_type == 'None' : #UVT, UV or T
norm_type = 'UVT'
if smooth == 'None' : #None, Gaussian or Lanczos
filter_norm=False
else :
filter_norm=True
if sigma == 'None' or sigma==0 :
sigma = 1
print('Warning : Sigma = 1 ')
if cutoff == 'None' : #Convolution function size for the Gaussian filter
cutoff=10
tmp=np.array( data_p['U'] )
tmp_shape = np.shape( tmp )
nx=tmp_shape[0]
ny=tmp_shape[1]
nz=tmp_shape[2]
norm=np.zeros((nx,ny,nz))
if norm_type == 'UVT' :
norm= norm + np.power( data_p['U'] - data_n['U'] , 2. )
norm= norm + np.power( data_p['V'] - data_n['V'] , 2. )
norm= norm + np.power( data_p['T'] - data_n['T'] , 2. )
if norm_type == 'UV' :
norm= norm + np.power( data_p['U'] - data_n['U'] , 2. )
norm= norm + np.power( data_p['V'] - data_n['V'] , 2. )
if norm_type == 'W' :
norm= norm + np.power( data_p['W'] - data_n['W'] , 2. )
if norm_type == 'T' :
norm= norm + np.power( data_p['T'] - data_n['T'] , 2. )
if norm_type == 'QV' :
norm= norm + np.power( data_p['QV'] - data_n['QV'] , 2. )
if norm_type == 'QHYD' :
norm= norm + np.power( data_p['QHYD'] - data_n['QHYD'] , 2. )
if filter_norm :
if smooth == 'Gaussian' :
filter_size=np.around( 2*cutoff*sigma , 0 ).astype(int)
gaussian_conv_func=np.zeros([ 2*filter_size+1 , 2*filter_size+1 ])
for ii in range(0,2*filter_size+1) :
for jj in range(0,2*filter_size+1) :
gaussian_conv_func[ii,jj] = np.exp( -0.5*(np.power( ii-filter_size,2 ) + np.power( jj-filter_size, 2) ) /np.power(sigma,2) )
#Normalize the convolving function.
gaussian_conv_func=gaussian_conv_func/np.sum( gaussian_conv_func)
#a=plt.figure
#plt.pcolor( gaussian_conv_func )
#plt.show(a)
for iz in range(0,nz) :
norm[:,:,iz]=signal.convolve2d(norm[:,:,iz],gaussian_conv_func, boundary='symm', mode='same')
#elif smooth == 'Lanczos' :
# for iz in range(0,nz) :
# mask=np.ones((nx,ny))
# norm[:,:,iz]=s2d.filter_2d(inputvar2d=norm[:,:,iz],mask=mask,lambdaf=sigma,ctyp=smooth,nx=nx,ny=ny)
else : #TODO add lanczos 2D filter option.
print('Smooth option not recognized, we will not smooth the norm')
norm=np.power(norm,0.5)
norm_mean , norm_max , norm_min = get_regional_average(norm,xi=xi,xe=xe,yi=yi,ye=ye,zi=zi,ze=ze)
return norm_mean , norm_max , norm_min , norm #Generate a tuple as output.
def norm_bv_2( data_p , data_n , norm_type='None' , smooth='None' , sigma='None',cutoff='None' ):
#Computes the bv in a similar way as the breeding fortran module does.
#Sigma is the standard deviation of the gaussian filter in units of grid points.
#Smooth is a filter option. Currently gaussian filter is supported.
if norm_type == 'None' : #UVT, UV or T
norm_type = 'UVT'
tmp=np.array( data_p['U'] )
tmp_shape = np.shape( tmp )
nx=tmp_shape[0]
ny=tmp_shape[1]
nz=tmp_shape[2]
norm=np.zeros((nx,ny,nz))
if norm_type == 'UVT' :
norm= norm + np.power( data_p['U'] - data_n['U'] , 2. )
norm= norm + np.power( data_p['V'] - data_n['V'] , 2. )
norm= norm + np.power( data_p['T'] - data_n['T'] , 2. )
if norm_type == 'UV' :
norm= norm + np.power( data_p['U'] - data_n['U'] , 2. )
norm= norm + np.power( data_p['V'] - data_n['V'] , 2. )
if norm_type == 'W' :
norm= norm + np.power( data_p['W'] - data_n['W'] , 2. )
if norm_type == 'T' :
norm= norm + np.power( data_p['T'] - data_n['T'] , 2. )
if norm_type == 'QV' :
norm= norm + np.power( data_p['QV'] - data_n['QV'] , 2. )
if norm_type == 'QHYD' :
norm= norm + np.power( data_p['QHYD'] - data_n['QHYD'] , 2. )
norm=np.power( norm , 0.5 )
return norm #Generate a tuple as output.
def filter_field( data , smooth='None' , sigma='None', cutoff='None' ):
if smooth == 'None' : #None, Gaussian or Lanczos
filter_norm=False
else :
filter_norm=True
if sigma == 'None' or sigma==0 :
sigma = 1
print('Warning : Sigma = 1 ')
if cutoff == 'None' : #Convolution function size for the Gaussian filter
cutoff=10
if( np.size( np.shape( data ) ) >= 3 ) :
nz=data.shape[2]
else :
nz=1
data_s=np.zeros( np.shape(data) )
if smooth == 'Gaussian' :
filter_size=np.round( 2*cutoff*sigma , 0 )
gaussian_conv_func=np.zeros(( 2*filter_size.astype(int) +1 , 2*filter_size.astype(int) +1 ))
for ii in range(0,2*filter_size.astype(int) + 1 ) :
for jj in range(0,2*filter_size.astype(int)+1) :
gaussian_conv_func[ii,jj] = np.exp( -0.5*(np.power( ii-filter_size.astype(int),2 ) + np.power( jj-filter_size.astype(int), 2) ) /np.power(sigma,2) )
#Normalize the convolving function.
gaussian_conv_func=gaussian_conv_func/np.sum( gaussian_conv_func )
if ( nz == 1 ) :
data_s[:,:]=signal.convolve2d(data[:,:],gaussian_conv_func, boundary='symm', mode='same')
else :
for iz in range(0,nz) :
data_s[:,:,iz]=signal.convolve2d(data[:,:,iz],gaussian_conv_func, boundary='symm', mode='same')
#elif smooth == 'Lanczos' :
# for iz in range(0,nz) :
# mask=np.ones((nx,ny))
# norm[:,:,iz]=s2d.filter_2d(inputvar2d=norm[:,:,iz],mask=mask,lambdaf=sigma,ctyp=smooth,nx=nx,ny=ny)
else : #TODO add lanczos 2D filter option.
print('Smooth option not recognized, we will not smooth the norm')
return data_s #Generate a tuple as output.
def lat_lon_to_i_j(lonfield,latfield,lonlist,latlist) :
#Gets the i,j which is closer to a particular lat and lon give a latfield, lonfield.
npoints=latlist.size
i=np.zeros(latlist.shape)
j=np.zeros(latlist.shape)
for ipoint in range(0,npoints) :
dist=np.power( latfield - latlist[ipoint] , 2.0 ) + np.power( lonfield - lonlist[ipoint] , 2.0 )
#Get the indices of the minimum
i[ipoint] , j[ipoint] = np.unravel_index(dist.argmin(), dist.shape)
return i , j
def growth_rate_bv( norm_o , norm_r , xi='None' , xe='None' , yi='None' , ye='None', zi='None', ze='None') :
nx = np.shape( norm_o )[0]
ny = np.shape( norm_o )[1]
nz = np.shape( norm_o )[2]
nregs=xi.size #Cuantas regiones tenemos definidas.
growth_rate=norm_o[:,:,:] - norm_r[:,:,:] #3d growing rate field.
growth_rate_mean=np.zeros(nregs)
growth_rate_max=np.zeros(nregs)
growth_rate_min=np.zeros(nregs)
growth_rate_mean , growth_rate_max , growth_rate_min = get_regional_average(growth_rate,xi=xi,xe=xe,yi=yi,ye=ye,zi=zi,ze=ze)
return growth_rate_mean , growth_rate_max , growth_rate_min , growth_rate
def get_regional_average( var , xi='None' , xe='None' , yi='None' , ye='None', zi='None', ze='None') :
#Given a 3D variable and a set of regions, get the mean, max and min of the variable over the grid points
#contained in these regions.
#Region is defined as a 3D rectangular section of the grid (in grid space).
nx = np.shape( var )[0]
ny = np.shape( var )[1]
if ( np.size ( np.shape ( var ) ) >= 3 ) :
nz = np.shape( var )[2]
else :
nz = 1
if xi == 'None' :
xi=np.ndarray(1)
xi[:]=0
if xe == 'None' :
xe=np.ndarray(1)
xe[:]=(nx - 1)
if yi == 'None' :
yi=np.ndarray(1)
yi[:]=0
if ye == 'None' :
ye=np.ndarray(1)
ye[:]=ny-1
if zi == 'None' :
zi=np.ndarray(xi.shape)
zi[:]=0
if ze == 'None' :
ze=np.ndarray(xi.shape)
ze[:]=(nz-1)
nregs=xi.size #Get number of regions
var_mean=np.zeros(nregs)
var_max=np.zeros(nregs)
var_min=np.zeros(nregs)
for ireg in range(0,nregs) :
if ( nz > 1 ) :
tmp=var[xi[ireg]:xe[ireg]+1,yi[ireg]:ye[ireg]+1,zi[ireg]:ze[ireg]+1] #Get subregion for var.
else :
tmp=var[xi[ireg]:xe[ireg]+1,yi[ireg]:ye[ireg]+1]
#Np.nan.. functions produce warnings with the input consist only of nan values. This warning is ignored.
with warnings.catch_warnings() :
warnings.simplefilter("ignore", category=RuntimeWarning)
var_mean[ireg]=np.nanmean( tmp )
var_max[ireg]=np.nanmax( tmp )
var_min[ireg]=np.nanmin( tmp )
return var_mean , var_max , var_min
def plot_bv(bv,data,lon,lat,plotvars,plotlevels,plotbasedir,mycolorbar='seismic',range='centered',figextension='None',fontsize='None',offset='None',figsize='None',debug=False,ndatalevels='None',date='None'):
#Example of optional inputs with default values.
if offset == 'None' :
offset=0
#End of example
#Create output directory
if not os.path.exists(plotbasedir):
os.makedirs(plotbasedir)
tmp=np.shape(lon)
nx=tmp[0]
ny=tmp[1]
for key in bv :
for var in plotvars :
if var == key : #We will plot a figure.
plotvar=bv[key][offset:nx-offset,offset:ny-offset,:]
plotlon=lon[offset:nx-offset,offset:ny-offset]
plotlat=lat[offset:nx-offset,offset:ny-offset]
plotvarname='bv_' + key
plot_var_levels(plotvar,plotlon,plotlat,plotlevels,plotbasedir,plotvarname,mycolorbar=mycolorbar,range=range,figextension='None',fontsize='None',offset='None',figsize='None',debug=False,date=date)
def plot_state(state,lon,lat,plotvars,plotlevels,plotbasedir,mycolorbar='coolwarm',range='maxmin',figextension='None',fontsize='None',offset='None',figsize='None',debug=False,ndatalevels='None',date='None'):
#Example of optional inputs with default values.
if offset == 'None' :
offset=0
#End of example
#Create output directory
if not os.path.exists(plotbasedir):
os.makedirs(plotbasedir)
tmp=np.shape(lon)
nx=tmp[0]
ny=tmp[1]
for key in state :
for var in plotvars :
if var == key : #We will plot a figure.
plotvar=state[key][offset:nx-offset,offset:ny-offset,:]
plotlon=lon[offset:nx-offset,offset:ny-offset]
plotlat=lat[offset:nx-offset,offset:ny-offset]
plotvarname='state_' + key
plot_var_levels(plotvar,plotlon,plotlat,plotlevels,plotbasedir,plotvarname,varcontour='None',mycolorbar=mycolorbar,range=range,figextension='None',fontsize='None',offset='None',figsize='None',debug=False,date=date)
def plot_var_levels(var,lon,lat,plotlevels,plotbasedir,varname,varcontour='None',clevels='None',ndatalevels='None',range='centered',scale_max='None',scale_min='None',figextension='None',fontsize='None',offset='None',figsize='None',debug=False,date='None',mycolorbar='seismic') :
#Example of optional inputs with default values.
if date=='None' :
got_date=False
date=''
else :
got_date=True
if ndatalevels=='None' :
ndatalevels=10
if figextension == 'None' :
figextension='png'
if fontsize == 'None' :
fontsize=20
if offset == 'None' :
offset=0
if figsize == 'None' :
figsize=default_figure_size
if varcontour== 'None' :
plot_contour=False
else :
plot_contour=True
#If we have a contour var, then define the clevels for plotting it.
if clevels == 'None' :
if ( np.size( varcontour.shape ) >= 3 ) :
data_range_max=np.max(abs(np.mean(varcontour,2)))
else :
data_range_max=np.max(abs( varcontour ) )
data_range_min=-data_range_max
delta_data=( data_range_max - data_range_min ) / 5
#clevels=np.zeros( ndatalevels )
clevels=np.arange( data_range_min , data_range_max , delta_data )
#Create output directory
if not os.path.exists(plotbasedir):
os.makedirs(plotbasedir)
#Set font size
matplotlib.rcParams.update({'font.size': fontsize})
tmp=np.shape(lon)
nx=tmp[0]
ny=tmp[1]
true_lon=np.mean(lon) #For projection.
true_lat=np.mean(lat) #For projection.
#Get upper right and lower left lats and lons for prejection.
ll_lat=lat[0,0]
ur_lat=lat[nx-1,ny-1]
ll_lon=lon[0,0]
ur_lon=lon[nx-1,ny-1]
for level in plotlevels :
fig=plt.figure(1,figsize=figsize)
#Basemap plotting section (maybe this can be sent to an independent function)
#Set the map projection
m = Basemap(projection='stere',lon_0=true_lon,lat_0=90.0,lat_ts=true_lat,\
llcrnrlat=ll_lat,urcrnrlat=ur_lat,llcrnrlon=ll_lon,urcrnrlon=ur_lon, \
rsphere=6371200.,resolution='h',area_thresh=10000)
m.drawcoastlines()
m.drawstates()
m.drawcountries()
# draw parallels.
parallels = np.arange(-90.0,90,0.5)
m.drawparallels(parallels,labels=[1,0,0,0],fontsize=10)
# draw meridians
meridians = np.arange(0.,360.,0.5)
m.drawmeridians(meridians,labels=[0,0,0,1],fontsize=10)
if ( np.size( np.shape(var) ) >= 3 ) :
tmpplot=var[offset:nx-offset,offset:ny-offset,level]
else :
tmpplot=var[offset:nx-offset,offset:ny-offset]
tmplon=lon[offset:nx-offset,offset:ny-offset]
tmplat=lat[offset:nx-offset,offset:ny-offset]
lonplot , latplot = m(tmplon,tmplat)
if range == 'centered' or range == 'None' :
var_range_max=np.nanmax(abs(tmpplot))
var_range_min=-var_range_max
elif range == 'maxmin' :
var_range_max=np.nanmax(tmpplot)
var_range_min=np.nanmin(tmpplot)
elif range == 'fixed' :
if scale_max == 'None' :
var_range_max=np.nanmax(tmpplot)
var_range_min=np.nanmin(tmpplot)
else :
var_range_max=scale_max
var_range_min=scale_min
elif range == 'positive' :
var_range_min=0
var_range_max=np.nanmax(tmpplot)
tmpplot = np.ma.masked_invalid(tmpplot)
m.pcolor(lonplot,latplot,tmpplot,cmap=plt.cm.get_cmap(mycolorbar),vmax=var_range_max,vmin=var_range_min)
m.colorbar()
if plot_contour :
if( np.size( np.shape( varcontour ) ) >= 3 ) :
tmpplot=np.nanmean(varcontour[offset:nx-offset,offset:ny-offset,:],2)
else :
tmpplot=varcontour[offset:nx-offset,offset:ny-offset]
matplotlib.rcParams['contour.negative_linestyle'] = 'solid' #Forces negative lines to be solid too.
m.contour(lonplot,latplot,tmpplot,levels=clevels,colors='k',linewidths=2,inline=1,fmt='%1.1f',fontsize=12)
my_title=varname + ' at level ' + str(level)
plt.ylabel('Lat')
plt.xlabel('Lon')
if got_date :
my_title=my_title + ' (' + date + ')'
plt.title(my_title)
print( 'Generationg the following figure : ' + 'Figure_' + varname + '_' + date + '_' + str(level) + '.' + figextension )
plt.savefig( plotbasedir + '/Figure_' + varname + '_' + date + '_' + str(level) + '.' + figextension )
else :
plt.title(my_title)
print( 'Generationg the following figure : ' + 'Figure_' + varname + '_' + str(level) + '.' + figextension )
plt.savefig( plotbasedir + '/Figure_' + varname + '_' + str(level) + '.' + figextension )
if debug :
plt.show(fig)
plt.close(fig)
else :
plt.close(fig)
def plot_var_ave(var,lon,lat,plotbasedir,varname,range='centered',varcontour='None',clevels='None',ndatalevels='None',levels='None',figextension='None',fontsize='None',offset='None',figsize='None',debug=False,date='None',mycolorbar='seismic') :
#Example of optional inputs with default values.
tmp=np.shape(var)
nx=tmp[0]
ny=tmp[1]
nz=tmp[2]
if date=='None' :
got_date=False
date=''
else :
got_date=True
if ndatalevels=='None' :
ndatalevels=10
if figextension == 'None' :
figextension='png'
if fontsize == 'None' :
fontsize=20
if offset == 'None' :
offset=0
if figsize == 'None' :
figsize=default_figure_size
if levels == 'None' :
levels=np.arange(0,nz)
if varcontour == 'None' :
plot_contour=False
else :
plot_contour=True
if clevels == 'None' :
data_range_max=np.nanmax(abs(np.mean(varcontour,2)))
data_range_min=-data_range_max
delta_data=( data_range_max - data_range_min )
clevels=np.zeros( ndatalevels )
clevels=np.arange( data_range_min , data_range_max , delta_data )
#End of example
#Create output directory
if not os.path.exists(plotbasedir):
os.makedirs(plotbasedir)
#Set font size
matplotlib.rcParams.update({'font.size': fontsize})
true_lon=np.mean(lon) #For projection.
true_lat=np.mean(lat) #For projection.
#Get upper right and lower left lats and lons for prejection.
ll_lat=lat[0,0]
ur_lat=lat[nx-1,ny-1]
ll_lon=lon[0,0]
ur_lon=lon[nx-1,ny-1]
#Plot averaged norm.
fig=plt.figure(1,figsize=figsize)
#Basemap plotting section (maybe this can be sent to an independent function)
#Set the map projection
m = Basemap(projection='stere',lon_0=true_lon,lat_0=90.0,lat_ts=true_lat,\
llcrnrlat=ll_lat,urcrnrlat=ur_lat,llcrnrlon=ll_lon,urcrnrlon=ur_lon, \
rsphere=6371200.,resolution='h',area_thresh=10000)
m.drawcoastlines()
m.drawstates()
m.drawcountries()
# draw parallels.
parallels = np.arange(-90.0,90,0.5)
m.drawparallels(parallels,labels=[1,0,0,0],fontsize=10)
# draw meridians
meridians = np.arange(0.,360.,0.5)
m.drawmeridians(meridians,labels=[0,0,0,1],fontsize=10)
#Plot vertical average of the variable.
tmpplot=np.nanmean(var[offset:nx-offset,offset:ny-offset,:],2)
tmplon=lon[offset:nx-offset,offset:ny-offset]
tmplat=lat[offset:nx-offset,offset:ny-offset]
lonplot , latplot = m(tmplon,tmplat)
if range == 'centered' or range == 'None' :
var_range_max=np.nanmax(abs(tmpplot))
var_range_min=-var_range_max
elif range == 'maxmin' :
var_range_max=np.nanmax(tmpplot)
var_range_min=np.nanmin(tmpplot)
tmpplot = np.ma.masked_invalid(tmpplot)
m.pcolor(lonplot,latplot,tmpplot,cmap=plt.cm.get_cmap(mycolorbar),vmax=var_range_max,vmin=var_range_min)
m.colorbar()
if plot_contour :
tmpplot=np.nansum(varcontour[offset:nx-offset,offset:ny-offset,:],2)
matplotlib.rcParams['contour.negative_linestyle'] = 'solid' #Forces negative lines to be solid too.
m.contour(lonplot,latplot,tmpplot,levels=clevels,colors='k',linewidths=2,inline=1,fmt='%1.1f',fontsize=12)
my_title= varname + ' vertical average '
if got_date :
my_title=my_title + ' (' + date + ')'
plt.title( my_title )
#plt.grid(True)
plt.ylabel('Lat')
plt.xlabel('Lon')
print( 'Generationg the following figure : ' + 'Figure_' + varname + '_' + date + '_avez.' + figextension )
plt.savefig( plotbasedir + 'Figure_' + varname + '_' + date + '_avez.' + figextension )
if debug :
plt.show(fig)
else :
plt.close(fig)
#X-Z -average plot
fig=plt.figure(1,figsize=figsize)
tmpplot=np.nanmean(var[offset:nx-offset,offset:ny-offset,:],0)
lonplot=np.nanmean(lon[offset:nx-offset,offset:ny-offset],0)
if range == 'centered' or range == 'None' :
var_range_max=np.nanmax(abs(tmpplot))
var_range_min=-var_range_max
elif range == 'maxmin' :
var_range_max=np.nanmax(tmpplot)
var_range_min=np.nanmin(tmpplot)
var_range_max=np.nanmax(abs(tmpplot))
var_range_min=-np.nanmax(abs(tmpplot))
levplot , lonplot = np.meshgrid( levels , lonplot )
tmpplot = np.ma.masked_invalid(tmpplot)
plt.pcolor(lonplot,levplot,tmpplot,cmap=plt.cm.get_cmap(mycolorbar),vmax=var_range_max,vmin=var_range_min)
plt.colorbar()
my_title= varname + ' Y average '
if got_date :
my_title=my_title + ' (' + date + ')'
plt.title( my_title )
plt.ylim(np.min(levplot),np.max(levplot))
plt.xlim(np.min(lonplot),np.max(lonplot))
plt.grid(True)
plt.ylabel('Height')
plt.xlabel('Lon')
if plot_contour :
tmpplot=np.nanmean(varcontour[offset:nx-offset,offset:ny-offset,:],0)
matplotlib.rcParams['contour.negative_linestyle'] = 'solid' #Forces negative lines to be solid too.
plt.contour(lonplot,levplot,tmpplot,levels=clevels,colors='k',linewidths=2,inline=1,fmt='%1.1f',fontsize=12)
print( 'Generationg the following figure : ' + 'Figure_' + varname + '_' + date + '_avey.' + figextension )
plt.savefig( plotbasedir + 'Figure_' + varname + '_' + date + '_avey.' + figextension )
if debug :
plt.show(fig)
else :
plt.close(fig)
#Y-Z -average plot
fig=plt.figure(1,figsize=figsize)
tmpplot=np.nanmean(var[offset:nx-offset,offset:ny-offset,:],1)
latplot=np.nanmean(lat[offset:nx-offset,offset:ny-offset],1)
levplot , latplot = np.meshgrid( levels , latplot )
if range == 'centered' or range == 'None' :
var_range_max=np.nanmax(abs(tmpplot))
var_range_min=-var_range_max
elif range == 'maxmin' :
var_range_max=np.nanmax(tmpplot)
var_range_min=np.nanmin(tmpplot)
tmpplot = np.ma.masked_invalid(tmpplot)
plt.pcolor(latplot,levplot,tmpplot,cmap=plt.cm.get_cmap(mycolorbar),vmax=var_range_max,vmin=var_range_min)
plt.colorbar()
my_title= varname + ' X average at '
if got_date :
my_title=my_title + ' (' + date + ')'
plt.title( my_title )
plt.ylim(np.min(levplot),np.max(levplot))
plt.xlim(np.min(latplot),np.max(latplot))
plt.grid(True)
plt.ylabel('Height')
plt.xlabel('Lat')
if plot_contour :
tmpplot=np.nanmean(varcontour[offset:nx-offset,offset:ny-offset,:],1)
matplotlib.rcParams['contour.negative_linestyle'] = 'solid' #Forces negative lines to be solid too.
plt.contour(latplot,levplot,tmpplot,levels=clevels,colors='k',linewidths=2,inline=1,fmt='%1.1f',fontsize=12)
print( 'Generationg the following figure : ' + 'Figure_' + varname + '_' + date + '_avex.' + figextension )
plt.savefig( plotbasedir + 'Figure_' + varname + '_' + date + '_avex.' + figextension )
if debug :
plt.show(fig)
else :
plt.close(fig)
def plot_norm_timeseries(norm_o,norm_i,norm_r,var_name,reg_name,plotbasedir,ibv,figprefix='None',figsize='None',figextension='None',debug=False) :
#Norm_o es la perturbacion evolucionada en t
#Norm_i es de donde partio la perturbacion evolucioanda en t-1
#Norm_r es la perturbacion o rescalada en t.
#Debido al bip, la serie de BV no se conecta en el tiempo.
if reg_name == 'None' :
plot_reg_name = False
if figsize == 'None' :
figsize=default_figure_size
if figextension == 'None' :
figextension='.png'
#Create output directory
if not os.path.exists(plotbasedir):
os.makedirs(plotbasedir)
#Creates plots of bv norm
tmp_shape=norm_o[var_name[0]].shape
ntimes=tmp_shape[0]
nbv=tmp_shape[1]
nregs=tmp_shape[2]
#Create time series.
time_serie=np.zeros([ntimes*4,nregs])
times=np.zeros(ntimes*4)
for myvar in var_name :
for ii in range(1,ntimes) :
#Creamos un grafico tipo serrucho para representar la evolucion de la norma.
time_serie[4*ii,:]=norm_i[myvar][ii-1,ibv,:]
time_serie[4*ii+1,:]=norm_o[myvar][ii,ibv,:]
time_serie[4*ii+2,:]=norm_r[myvar][ii,ibv,:]
time_serie[4*ii+3,:]=np.nan
times[4*ii]=ii-1
times[4*ii+1]=ii
times[4*ii+2]=ii
times[4*ii+3]=ii
for ireg in range(0,nregs) :
iregstr="%04d" % ( ireg + 1 )
fig=plt.figure(1,figsize=figsize)
#for it in range(0,niter) :
plt.plot(times,time_serie[:,ireg],'-')
plt.ylabel('Norm')
plt.xlabel('Time')
if debug == True :
plt.show()
print( 'Generationg the following figure : ' + 'Figure_' + figprefix + myvar + 'reg' + iregstr + figextension )
plt.savefig( plotbasedir + 'Figure_' + figprefix + myvar + 'reg' + iregstr + figextension )
plt.close(fig)
#plt.plot(times,meandisttrack[:,0],'r-',linewidth=3,label='CTRL')
#ax.fill_between(times, meandisttrack[:,0]+errorbar[:,0],meandisttrack[:,0]-errorbar[:,0], facecolor='red', alpha=0.1)
#plt.plot(times,meandisttrack[:,1],'b-',linewidth=3,label='PE')
#ax.fill_between(times, meandisttrack[:,1]+errorbar[:,1],meandisttrack[:,1]-errorbar[:,1], facecolor='blue', alpha=0.1)
#plt.legend(fontsize=14,loc='upper right')
|
gustfrontar/LETKF_WRF
|
common_python/scale_modules/bred_vector_functions.py
|
Python
|
gpl-3.0
| 26,147
|
[
"Gaussian"
] |
6035b2de52b5177b6848cc8d1fe7648a085dae0b1b867693e68aa111ae8d5cb7
|
###########################################################################
#
# This program is part of Zenoss Core, an open source monitoring platform.
# Copyright (C) 2008-2010, Zenoss Inc.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2, or (at your
# option) any later version, as published by the Free Software Foundation.
#
# For complete information please visit: http://www.zenoss.com/oss/
#
###########################################################################
from ctypes import *
from pysamba.twisted.reactor import reactor, eventContext
from pysamba.library import *
from pysamba.wbem.wbem import *
from pysamba.wbem.Query import Query
from pysamba.twisted.callback import WMIFailure
import sys
import logging
log = logging.getLogger('p.t.watcher')
import Globals
from Products.ZenUtils.Driver import drive
from twisted.internet import defer
wql = """SELECT * FROM __InstanceCreationEvent where """\
"""TargetInstance ISA 'Win32_NTLogEvent' """\
"""and TargetInstance.EventType <= 5"""
def async_sleep(secs):
d = defer.Deferred()
reactor.callLater(secs, d.callback, None)
return d
def printSize():
"Monitor this process' memory usage"
import gc
gc.collect()
sz = open('/proc/%d/statm' % os.getpid()).read().split()[0]
log.info('*'*40)
log.info("Current size: %s" % (sz,) )
def doOneDevice(creds, hostname):
def inner(driver):
try:
q = Query()
yield q.connect(eventContext, hostname, creds)
driver.next()
yield q.notificationQuery(wql)
result = driver.next()
log.info("Query sent")
while 1:
printSize()
try:
class_name = ''
while 1:
yield result.fetchSome(500)
if not driver.next(): break
log.info("Got %d items", len(driver.next()))
for obj in driver.next():
obj = obj.targetinstance
props = [p for p in obj.__dict__.keys()
if not p.startswith('_')]
if obj._class_name != class_name:
class_name = obj._class_name
print obj._class_name
print repr(props)
print repr([getattr(obj, p) for p in props])
except WError, ex:
log.exception(ex)
yield async_sleep(1)
driver.next()
q.close()
except Exception, ex:
log.exception(ex)
return drive(inner)
def main():
logging.basicConfig()
log = logging.getLogger()
log.setLevel(20)
# DEBUGLEVEL.value = 99
creds = sys.argv[1]
hosts = sys.argv[2:]
def stop(result):
print result
reactor.stop()
def later():
d = defer.DeferredList(
[doOneDevice(creds, h) for h in hosts]
)
d.addBoth(stop)
reactor.callLater(1, later)
reactor.run()
sys.exit(main())
|
NetNow/wmi-samba
|
pysamba/test/watcher.py
|
Python
|
gpl-2.0
| 3,267
|
[
"VisIt"
] |
921f34958df220e8ec26658c952046e0fb430625549c54b6086775975db69205
|
#!/usr/bin/env python
"""on_off_problem.py
This example compares Z values from various hypothesis tests performed
when incorporating a systematic uncertainty into a test of the
background-only hypothesis for a Poisson process.
The on/off problem, the different statistical tests and the input/output values
are described in http://arxiv.org/abs/physics/0702156
"""
import math
import matplotlib.pyplot as plt
import numpy as np
import statspy as sp
import statspy.hypotest
# Experimental data stored as (n_on, n_off, tau)
exps = [[4, 5, 5.],
[6, 18.78, 14.44],
[9, 17.83, 4.69],
[17, 40.11, 10.56],
[50, 55, 2.],
[67, 15, 0.5],
[200, 10, 0.1],
[523, 2327, 5.99],
[498426, 493434, 1.],
#[211949, 23650096, 11.21], # numerical instabilities with this exp
]
# Define the different parameters of the problem
# Parameter of interest:
mu = sp.Param(name='mu', value=0., poi=True) # Signal strength
s = sp.Param(name='s', value=3., const=True) # Expected number of signal evts
# Nuisance parameter:
b = sp.Param('b=1.') # Expected number of bkg evts in the signal region
# Transfer factor between the control and signal regions (constant)
tau = sp.Param(name='tau', value=5., const=True)
# With normal bkg shape, standard deviation on b
sigmab = sp.Param('sigmab=0.447', const=True)
# Derived quantities
mu_on = mu * s + b # Total events expectation in the signal region
mu_off = tau*b # Total events expectation in the control region
mu_on.name = 'mu_on' # Parameters must be named to be recalled later via str
mu_off.name = 'mu_off'
rho = 1./(1. + tau)
rho.name = 'rho'
# Define the probability mass function corresponding to n_on
pmf_on = sp.PF('pmf_on=poisson(n_on;mu_on)')
# Define the probability mass function of n_off as a Poisson
pmf_off = sp.PF('pmf_off=poisson(n_off;mu_off)')
likelihood_P = pmf_on * pmf_off
# Approximate the distribution of b as a Gaussian of width sigmab
pdf_off = sp.PF('pdf_off=norm(x;b,sigmab)')
likelihood_G = pmf_on * pdf_off
# The joint distribution likelihood_P can be rewritten as the product of a
# poisson distribution on n_tot = n_on + n_off and a binomial distribution
# with parameter rho. The binomial part is used to compute a p-value in a
# Frequentist approach.
n_tot = sp.Param('n_tot=9.', const=True)
pmf_ratio = sp.PF('pmf_ratio=binom(n_on;n_tot,rho)')
# Loop over experiments
for exp in exps:
# Initialize RAW parameters for that experiment
(n_on, n_off, tau0) = exp
mu.value = 0. # Test a background only null hypothesis
tau.value = tau0
b.value = n_off / tau0
s.value = n_on - b.value
sigmab.value = math.sqrt(b.value) / math.sqrt(tau0)
# Compute Z-value in case of no uncertainty on b
Z_P = statspy.hypotest.pvalue_to_Zvalue(pmf_on.pvalue(n_on))
exp.append(Z_P)
#
# Frequentist solution
#
n_tot.value = n_on + round(n_off)
Z_Bi = statspy.hypotest.pvalue_to_Zvalue(pmf_ratio.pvalue(n_on))
exp.append(Z_Bi)
#
# Profile likelihood methods
#
data_P = (n_on, round(n_off))
res_pllr_P = statspy.hypotest.likelihood_ratio(likelihood_P, data_P)
exp.append(res_pllr_P.Zvalue)
data_G = (n_on, n_off/tau0)
res_pllr_G = statspy.hypotest.likelihood_ratio(likelihood_G, data_G)
exp.append(res_pllr_G.Zvalue)
for idx,var in enumerate(['n_on','n_off','tau','Z_P','Z_Bi','Z_PLP','Z_PLG']):
the_str = var
for exp in exps:
the_str = the_str + '\t%5.2f' % exp[idx]
print the_str
|
bruneli/statspy
|
examples/on_off_problem.py
|
Python
|
bsd-3-clause
| 3,533
|
[
"Gaussian"
] |
f75eb87d0ddd25d62ccff8f0d1870ff235638392e2f9951f9840f2047126c214
|
"""
One electron integrals.
"""
from numpy import pi,exp,floor,array,isclose
from math import factorial
from pyquante2.utils import binomial, fact2, Fgamma, norm2
# Notes:
# The versions S,T,V include the normalization constants
# The version overlap,kinetic,nuclear_attraction do not.
# This is so, for example, the kinetic routines can call the potential routines
# without the normalization constants getting in the way.
def S(a,b):
"""
Simple interface to the overlap function.
>>> from pyquante2 import pgbf,cgbf
>>> s = pgbf(1)
>>> isclose(S(s,s),1.0)
True
>>> sc = cgbf(exps=[1],coefs=[1])
>>> isclose(S(sc,sc),1.0)
True
>>> sc = cgbf(exps=[1],coefs=[1])
>>> isclose(S(sc,s),1.0)
True
>>> isclose(S(s,sc),1.0)
True
"""
if b.contracted:
return sum(cb*S(pb,a) for (cb,pb) in b)
elif a.contracted:
return sum(ca*S(b,pa) for (ca,pa) in a)
return a.norm*b.norm*overlap(a.exponent,a.powers,
a.origin,b.exponent,b.powers,b.origin)
def T(a,b):
"""
Simple interface to the kinetic function.
>>> from pyquante2 import pgbf,cgbf
>>> from pyquante2.basis.pgbf import pgbf
>>> s = pgbf(1)
>>> isclose(T(s,s),1.5)
True
>>> sc = cgbf(exps=[1],coefs=[1])
>>> isclose(T(sc,sc),1.5)
True
>>> sc = cgbf(exps=[1],coefs=[1])
>>> isclose(T(sc,s),1.5)
True
>>> isclose(T(s,sc),1.5)
True
"""
if b.contracted:
return sum(cb*T(pb,a) for (cb,pb) in b)
elif a.contracted:
return sum(ca*T(b,pa) for (ca,pa) in a)
return a.norm*b.norm*kinetic(a.exponent,a.powers,a.origin,
b.exponent,b.powers,b.origin)
def V(a,b,C):
"""
Simple interface to the nuclear attraction function.
>>> from pyquante2 import pgbf,cgbf
>>> s = pgbf(1)
>>> isclose(V(s,s,(0,0,0)),-1.595769)
True
>>> sc = cgbf(exps=[1],coefs=[1])
>>> isclose(V(sc,sc,(0,0,0)),-1.595769)
True
>>> sc = cgbf(exps=[1],coefs=[1])
>>> isclose(V(sc,s,(0,0,0)),-1.595769)
True
>>> isclose(V(s,sc,(0,0,0)),-1.595769)
True
"""
if b.contracted:
return sum(cb*V(pb,a,C) for (cb,pb) in b)
elif a.contracted:
return sum(ca*V(b,pa,C) for (ca,pa) in a)
return a.norm*b.norm*nuclear_attraction(a.exponent,a.powers,a.origin,
b.exponent,b.powers,b.origin,C)
def overlap(alpha1,lmn1,A,alpha2,lmn2,B):
"""
Full form of the overlap integral. Taken from THO eq. 2.12
>>> isclose(overlap(1,(0,0,0),array((0,0,0),'d'),1,(0,0,0),array((0,0,0),'d')),1.968701)
True
"""
l1,m1,n1 = lmn1
l2,m2,n2 = lmn2
rab2 = norm2(A-B)
gamma = alpha1+alpha2
P = gaussian_product_center(alpha1,A,alpha2,B)
pre = pow(pi/gamma,1.5)*exp(-alpha1*alpha2*rab2/gamma)
wx = overlap1d(l1,l2,P[0]-A[0],P[0]-B[0],gamma)
wy = overlap1d(m1,m2,P[1]-A[1],P[1]-B[1],gamma)
wz = overlap1d(n1,n2,P[2]-A[2],P[2]-B[2],gamma)
return pre*wx*wy*wz
def overlap1d(l1,l2,PAx,PBx,gamma):
"""
The one-dimensional component of the overlap integral. Taken from THO eq. 2.12
>>> isclose(overlap1d(0,0,0,0,1),1.0)
True
"""
total = 0
for i in range(1+int(floor(0.5*(l1+l2)))):
total += binomial_prefactor(2*i,l1,l2,PAx,PBx)* \
fact2(2*i-1)/pow(2*gamma,i)
return total
def gaussian_product_center(alpha1,A,alpha2,B):
"""
The center of the Gaussian resulting from the product of two Gaussians:
>>> gaussian_product_center(1,array((0,0,0),'d'),1,array((0,0,0),'d'))
array([ 0., 0., 0.])
"""
return (alpha1*A+alpha2*B)/(alpha1+alpha2)
def binomial_prefactor(s,ia,ib,xpa,xpb):
"""
The integral prefactor containing the binomial coefficients from Augspurger and Dykstra.
>>> binomial_prefactor(0,0,0,0,0)
1
"""
total= 0
for t in range(s+1):
if s-ia <= t <= ib:
total += binomial(ia,s-t)*binomial(ib,t)* \
pow(xpa,ia-s+t)*pow(xpb,ib-t)
return total
def kinetic(alpha1,lmn1,A,alpha2,lmn2,B):
"""
The full form of the kinetic energy integral
>>> isclose(kinetic(1,(0,0,0),array((0,0,0),'d'),1,(0,0,0),array((0,0,0),'d')),2.953052)
True
"""
l1,m1,n1 = lmn1
l2,m2,n2 = lmn2
term0 = alpha2*(2*(l2+m2+n2)+3)*\
overlap(alpha1,(l1,m1,n1),A,\
alpha2,(l2,m2,n2),B)
term1 = -2*pow(alpha2,2)*\
(overlap(alpha1,(l1,m1,n1),A,
alpha2,(l2+2,m2,n2),B)
+ overlap(alpha1,(l1,m1,n1),A,
alpha2,(l2,m2+2,n2),B)
+ overlap(alpha1,(l1,m1,n1),A,
alpha2,(l2,m2,n2+2),B))
term2 = -0.5*(l2*(l2-1)*overlap(alpha1,(l1,m1,n1),A,
alpha2,(l2-2,m2,n2),B) +
m2*(m2-1)*overlap(alpha1,(l1,m1,n1),A,
alpha2,(l2,m2-2,n2),B) +
n2*(n2-1)*overlap(alpha1,(l1,m1,n1),A,
alpha2,(l2,m2,n2-2),B))
return term0+term1+term2
def nuclear_attraction(alpha1,lmn1,A,alpha2,lmn2,B,C):
"""
Full form of the nuclear attraction integral
>>> isclose(nuclear_attraction(1,(0,0,0),array((0,0,0),'d'),1,(0,0,0),array((0,0,0),'d'),array((0,0,0),'d')),-3.141593)
True
"""
l1,m1,n1 = lmn1
l2,m2,n2 = lmn2
gamma = alpha1+alpha2
P = gaussian_product_center(alpha1,A,alpha2,B)
rab2 = norm2(A-B)
rcp2 = norm2(C-P)
dPA = P-A
dPB = P-B
dPC = P-C
Ax = A_array(l1,l2,dPA[0],dPB[0],dPC[0],gamma)
Ay = A_array(m1,m2,dPA[1],dPB[1],dPC[1],gamma)
Az = A_array(n1,n2,dPA[2],dPB[2],dPC[2],gamma)
total = 0.
for I in range(l1+l2+1):
for J in range(m1+m2+1):
for K in range(n1+n2+1):
total += Ax[I]*Ay[J]*Az[K]*Fgamma(I+J+K,rcp2*gamma)
val= -2*pi/gamma*exp(-alpha1*alpha2*rab2/gamma)*total
return val
def A_term(i,r,u,l1,l2,PAx,PBx,CPx,gamma):
"""
THO eq. 2.18
>>> A_term(0,0,0,0,0,0,0,0,1)
1.0
>>> A_term(0,0,0,0,1,1,1,1,1)
1.0
>>> A_term(1,0,0,0,1,1,1,1,1)
-1.0
>>> A_term(0,0,0,1,1,1,1,1,1)
1.0
>>> A_term(1,0,0,1,1,1,1,1,1)
-2.0
>>> A_term(2,0,0,1,1,1,1,1,1)
1.0
>>> A_term(2,0,1,1,1,1,1,1,1)
-0.5
>>> A_term(2,1,0,1,1,1,1,1,1)
0.5
"""
return pow(-1,i)*binomial_prefactor(i,l1,l2,PAx,PBx)*\
pow(-1,u)*factorial(i)*pow(CPx,i-2*r-2*u)*\
pow(0.25/gamma,r+u)/factorial(r)/factorial(u)/factorial(i-2*r-2*u)
def A_array(l1,l2,PA,PB,CP,g):
"""
THO eq. 2.18 and 3.1
>>> A_array(0,0,0,0,0,1)
[1.0]
>>> A_array(0,1,1,1,1,1)
[1.0, -1.0]
>>> A_array(1,1,1,1,1,1)
[1.5, -2.5, 1.0]
"""
Imax = l1+l2+1
A = [0]*Imax
for i in range(Imax):
for r in range(int(floor(i/2)+1)):
for u in range(int(floor((i-2*r)/2)+1)):
I = i-2*r-u
A[I] = A[I] + A_term(i,r,u,l1,l2,PA,PB,CP,g)
return A
if __name__ == '__main__':
import doctest; doctest.testmod()
|
berquist/pyquante2
|
pyquante2/ints/one.py
|
Python
|
bsd-3-clause
| 7,247
|
[
"Gaussian"
] |
78ceb423b8f66c4a4feb86ff3489d286f04aa496447faec823410c6d0efcd84c
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import unittest
import pytest # type: ignore
from _pytest.monkeypatch import MonkeyPatch # type: ignore
import os
import tempfile
import hashlib
from zipfile import ZipFile
from monty.json import MontyDecoder
from pymatgen import SETTINGS
from pymatgen.io.vasp.sets import *
from pymatgen.io.vasp.inputs import Poscar, Kpoints
from pymatgen.core import Specie, Lattice, Structure
from pymatgen.core.surface import SlabGenerator
from pymatgen.util.testing import PymatgenTest
from pymatgen.io.vasp.outputs import Vasprun
MODULE_DIR = Path(__file__).resolve().parent
dec = MontyDecoder()
class SetChangeCheckTest(PymatgenTest):
def test_sets_changed(self):
# WARNING!
# These tests will fail when you change an input set.
# They are included as a sanity check: if you want to change
# an input set, please make sure to notify the users for that set.
# For sets starting with "MVL" this is @shyuep, for sets starting
# with "MP" this is @shyuep and @mkhorton.
os.chdir(MODULE_DIR / "..")
input_sets = glob.glob("*.yaml")
hashes = {}
for input_set in input_sets:
with open(input_set, "r") as f:
hashes[input_set] = hashlib.sha1(f.read().encode("utf-8")).hexdigest()
known_hashes = {'MVLGWSet.yaml': 'f4df9516cf7dd923b37281172c662a70fa32bebc',
'MVLRelax52Set.yaml': 'eb538ffb45c0cd13f13df48afc1e71c44d2e34b2',
'MPHSERelaxSet.yaml': '2bb969e64b57ff049077c8ec10e64f94c9c97f42',
'VASPIncarBase.yaml': 'dbdbfe7d5c055a3f1e87223a031ae3ad58631395',
'MPSCANRelaxSet.yaml': 'd582e2e6dc55e1931c7616bacaf703326f3f1110',
'MPRelaxSet.yaml': '6e981500f8b8b3c33b6bee3c279a3b824cbafe3d',
'MITRelaxSet.yaml': '1a0970f8cad9417ec810f7ab349dc854eaa67010',
'vdW_parameters.yaml': '66541f58b221c8966109156f4f651b2ca8aa76da'}
# assert hashes == known_hashes
if hashes != known_hashes:
raise UserWarning(
'These tests will fail when you change an input set. \
They are included as a sanity check: if you want to change \
an input set, please make sure to notify the users for that set. \
For sets starting with "MVL" this is @shyuep, for sets starting \
with "MP" this is @shyuep and @mkhorton.'
)
class MITMPRelaxSetTest(PymatgenTest):
@classmethod
def setUpClass(cls):
cls.monkeypatch = MonkeyPatch()
filepath = cls.TEST_FILES_DIR / "POSCAR"
poscar = Poscar.from_file(filepath)
cls.structure = poscar.structure
cls.coords = [[0, 0, 0], [0.75, 0.5, 0.75]]
cls.lattice = Lattice(
[
[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603],
]
)
cls.mitset = MITRelaxSet(cls.structure)
cls.mitset_unsorted = MITRelaxSet(cls.structure, sort_structure=False)
cls.mpset = MPRelaxSet(cls.structure)
def setUp(self):
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
def test_metal_check(self):
structure = Structure.from_spacegroup(
"Fm-3m", Lattice.cubic(3), ["Cu"], [[0, 0, 0]]
)
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
vis = MITRelaxSet(structure)
incar = vis.incar
# Verify some things
self.assertIn("ISMEAR", str(w[-1].message))
def test_poscar(self):
structure = Structure(self.lattice, ["Fe", "Mn"], self.coords)
mitparamset = MITRelaxSet(structure, sort_structure=False)
s_unsorted = mitparamset.poscar.structure
mitparamset = MITRelaxSet(structure, sort_structure=True)
s_sorted = mitparamset.poscar.structure
self.assertEqual(s_unsorted[0].specie.symbol, "Fe")
self.assertEqual(s_sorted[0].specie.symbol, "Mn")
def test_potcar_symbols(self):
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
coords.append([0.75, 0.25, 0.75])
lattice = Lattice(
[
[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603],
]
)
structure = Structure(lattice, ["P", "Fe", "O"], coords)
mitparamset = MITRelaxSet(structure)
syms = mitparamset.potcar_symbols
self.assertEqual(syms, ["Fe", "P", "O"])
paramset = MPRelaxSet(structure, sort_structure=False)
syms = paramset.potcar_symbols
self.assertEqual(syms, ["P", "Fe_pv", "O"])
def test_potcar_validation(self):
structure = Structure(self.lattice, ["P", "Fe"], self.coords)
# Use pytest's monkeypatch to temporarily point pymatgen to a directory
# containing the wrong POTCARs (LDA potcars in a PBE directory)
with self.monkeypatch.context() as m:
m.setitem(SETTINGS, "PMG_VASP_PSP_DIR", str(self.TEST_FILES_DIR / "wrong_potcars"))
with pytest.warns(BadInputSetWarning, match="not known by pymatgen"):
MITRelaxSet(structure).potcar
def test_lda_potcar(self):
structure = Structure(self.lattice, ["P", "Fe"], self.coords)
p = MITRelaxSet(structure, potcar_functional="LDA").potcar
self.assertEqual(p.functional, "LDA")
def test_nelect(self):
coords = [[0] * 3, [0.5] * 3, [0.75] * 3]
lattice = Lattice.cubic(4)
s = Structure(lattice, ["Si", "Si", "Fe"], coords)
self.assertAlmostEqual(MITRelaxSet(s).nelect, 16)
# Check that it works even when oxidation states are present. Was a bug
# previously.
s = Structure(lattice, ["Si4+", "Si4+", "Fe2+"], coords)
self.assertAlmostEqual(MITRelaxSet(s).nelect, 16)
self.assertAlmostEqual(MPRelaxSet(s).nelect, 22)
# Check that it works for disordered structure. Was a bug previously
s = Structure(lattice, ["Si4+", "Fe2+", "Si4+"], coords)
self.assertAlmostEqual(MITRelaxSet(s).nelect, 16)
self.assertAlmostEqual(MPRelaxSet(s).nelect, 22)
def test_get_incar(self):
incar = self.mpset.incar
self.assertEqual(incar["LDAUU"], [5.3, 0, 0])
self.assertAlmostEqual(incar["EDIFF"], 0.0012)
incar = self.mitset.incar
self.assertEqual(incar["LDAUU"], [4.0, 0, 0])
self.assertAlmostEqual(incar["EDIFF"], 1e-5)
si = 14
coords = list()
coords.append(np.array([0, 0, 0]))
coords.append(np.array([0.75, 0.5, 0.75]))
# Silicon structure for testing.
latt = Lattice(
np.array(
[
[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603],
]
)
)
struct = Structure(latt, [si, si], coords)
incar = MPRelaxSet(struct).incar
self.assertNotIn("LDAU", incar)
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
lattice = Lattice(
[
[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603],
]
)
struct = Structure(lattice, ["Fe", "Mn"], coords)
incar = MPRelaxSet(struct).incar
self.assertNotIn("LDAU", incar)
# check fluorides
struct = Structure(lattice, ["Fe", "F"], coords)
incar = MPRelaxSet(struct).incar
self.assertEqual(incar["LDAUU"], [5.3, 0])
self.assertEqual(incar["MAGMOM"], [5, 0.6])
struct = Structure(lattice, ["Fe", "F"], coords)
incar = MITRelaxSet(struct).incar
self.assertEqual(incar["LDAUU"], [4.0, 0])
# Make sure this works with species.
struct = Structure(lattice, ["Fe2+", "O2-"], coords)
incar = MPRelaxSet(struct).incar
self.assertEqual(incar["LDAUU"], [5.3, 0])
struct = Structure(
lattice, ["Fe", "Mn"], coords, site_properties={"magmom": (5.2, -4.5)}
)
incar = MPRelaxSet(struct).incar
self.assertEqual(incar["MAGMOM"], [-4.5, 5.2])
incar = MITRelaxSet(struct, sort_structure=False).incar
self.assertEqual(incar["MAGMOM"], [5.2, -4.5])
struct = Structure(lattice, [Specie("Fe", 2, {"spin": 4.1}), "Mn"], coords)
incar = MPRelaxSet(struct).incar
self.assertEqual(incar["MAGMOM"], [5, 4.1])
struct = Structure(lattice, ["Mn3+", "Mn4+"], coords)
incar = MITRelaxSet(struct).incar
self.assertEqual(incar["MAGMOM"], [4, 3])
userset = MPRelaxSet(
struct, user_incar_settings={"MAGMOM": {"Fe": 10, "S": -5, "Mn3+": 100}}
)
self.assertEqual(userset.incar["MAGMOM"], [100, 0.6])
noencutset = MPRelaxSet(struct, user_incar_settings={"ENCUT": None})
self.assertNotIn("ENCUT", noencutset.incar)
# sulfide vs sulfate test
coords = list()
coords.append([0, 0, 0])
coords.append([0.75, 0.5, 0.75])
coords.append([0.25, 0.5, 0])
struct = Structure(lattice, ["Fe", "Fe", "S"], coords)
incar = MITRelaxSet(struct).incar
self.assertEqual(incar["LDAUU"], [1.9, 0])
# Make sure Matproject sulfides are ok.
self.assertNotIn("LDAUU", MPRelaxSet(struct).incar)
struct = Structure(lattice, ["Fe", "S", "O"], coords)
incar = MITRelaxSet(struct).incar
self.assertEqual(incar["LDAUU"], [4.0, 0, 0])
# Make sure Matproject sulfates are ok.
self.assertEqual(MPRelaxSet(struct).incar["LDAUU"], [5.3, 0, 0])
# test for default LDAUU value
userset_ldauu_fallback = MPRelaxSet(
struct, user_incar_settings={"LDAUU": {"Fe": 5.0, "S": 0}}
)
self.assertEqual(userset_ldauu_fallback.incar["LDAUU"], [5.0, 0, 0])
# Expected to be oxide (O is the most electronegative atom)
s = Structure(lattice, ["Fe", "O", "S"], coords)
incar = MITRelaxSet(s).incar
self.assertEqual(incar["LDAUU"], [4.0, 0, 0])
# Expected to be chloride (Cl is the most electronegative atom)
s = Structure(lattice, ["Fe", "Cl", "S"], coords)
incar = MITRelaxSet(s, user_incar_settings={"LDAU": True}).incar
self.assertFalse("LDAUU" in incar) # LDAU = False
# User set a compound to be sulfide by specifing values of "LDAUL" etc.
s = Structure(lattice, ["Fe", "Cl", "S"], coords)
incar = MITRelaxSet(
s,
user_incar_settings={
"LDAU": True,
"LDAUL": {"Fe": 3},
"LDAUU": {"Fe": 1.8},
},
).incar
self.assertEqual(incar["LDAUL"], [3.0, 0, 0])
self.assertEqual(incar["LDAUU"], [1.8, 0, 0])
# test that van-der-Waals parameters are parsed correctly
incar = MITRelaxSet(struct, vdw="optB86b").incar
self.assertEqual(incar["GGA"], "Mk")
self.assertEqual(incar["LUSE_VDW"], True)
self.assertEqual(incar["PARAM1"], 0.1234)
# Test that NELECT is updated when a charge is present
si = 14
coords = list()
coords.append(np.array([0, 0, 0]))
coords.append(np.array([0.75, 0.5, 0.75]))
# Silicon structure for testing.
latt = Lattice(
np.array(
[
[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603],
]
)
)
struct = Structure(latt, [si, si], coords, charge=1)
mpr = MPRelaxSet(struct, use_structure_charge=True)
self.assertEqual(
mpr.incar["NELECT"], 7, "NELECT not properly set for nonzero charge"
)
# test that NELECT does not get set when use_structure_charge = False
mpr = MPRelaxSet(struct, use_structure_charge=False)
self.assertFalse(
"NELECT" in mpr.incar.keys(),
"NELECT should not be set when " "use_structure_charge is False",
)
def test_get_kpoints(self):
kpoints = MPRelaxSet(self.structure).kpoints
self.assertEqual(kpoints.kpts, [[2, 4, 5]])
self.assertEqual(kpoints.style, Kpoints.supported_modes.Gamma)
kpoints = MPRelaxSet(
self.structure, user_kpoints_settings={"reciprocal_density": 1000}
).kpoints
self.assertEqual(kpoints.kpts, [[6, 10, 13]])
self.assertEqual(kpoints.style, Kpoints.supported_modes.Gamma)
kpoints_obj = Kpoints(kpts=[[3, 3, 3]])
kpoints_return = MPRelaxSet(
self.structure, user_kpoints_settings=kpoints_obj
).kpoints
self.assertEqual(kpoints_return.kpts, [[3, 3, 3]])
kpoints = self.mitset.kpoints
self.assertEqual(kpoints.kpts, [[25]])
self.assertEqual(kpoints.style, Kpoints.supported_modes.Automatic)
recip_paramset = MPRelaxSet(self.structure, force_gamma=True)
recip_paramset.kpoints_settings = {"reciprocal_density": 40}
kpoints = recip_paramset.kpoints
self.assertEqual(kpoints.kpts, [[2, 4, 5]])
self.assertEqual(kpoints.style, Kpoints.supported_modes.Gamma)
def test_get_vasp_input(self):
d = self.mitset.get_vasp_input()
self.assertEqual(d["INCAR"]["ISMEAR"], -5)
s = self.structure.copy()
s.make_supercell(4)
paramset = MPRelaxSet(s)
d = paramset.get_vasp_input()
self.assertEqual(d["INCAR"]["ISMEAR"], 0)
def test_MPMetalRelaxSet(self):
mpmetalset = MPMetalRelaxSet(self.get_structure("Sn"))
incar = mpmetalset.incar
self.assertEqual(incar["ISMEAR"], 1)
self.assertEqual(incar["SIGMA"], 0.2)
kpoints = mpmetalset.kpoints
self.assertArrayAlmostEqual(kpoints.kpts[0], [5, 5, 5])
def test_as_from_dict(self):
mitset = MITRelaxSet(self.structure)
mpset = MPRelaxSet(self.structure)
mpuserset = MPRelaxSet(
self.structure,
user_incar_settings={"MAGMOM": {"Fe": 10, "S": -5, "Mn3+": 100}},
)
d = mitset.as_dict()
v = dec.process_decoded(d)
self.assertEqual(v._config_dict["INCAR"]["LDAUU"]["O"]["Fe"], 4)
d = mpset.as_dict()
v = dec.process_decoded(d)
self.assertEqual(v._config_dict["INCAR"]["LDAUU"]["O"]["Fe"], 5.3)
d = mpuserset.as_dict()
v = dec.process_decoded(d)
# self.assertEqual(type(v), MPVaspInputSet)
self.assertEqual(
v.user_incar_settings["MAGMOM"], {"Fe": 10, "S": -5, "Mn3+": 100}
)
def test_hubbard_off_and_ediff_override(self):
p = MPRelaxSet(
self.structure, user_incar_settings={"LDAU": False, "EDIFF": 1e-10}
)
self.assertNotIn("LDAUU", p.incar)
self.assertEqual(p.incar["EDIFF"], 1e-10)
def test_write_input(self):
self.mitset.write_input(".", make_dir_if_not_present=True)
for f in ["INCAR", "KPOINTS", "POSCAR", "POTCAR"]:
self.assertTrue(os.path.exists(f))
self.assertFalse(os.path.exists("Fe4P4O16.cif"))
self.mitset.write_input(".", make_dir_if_not_present=True, include_cif=True)
self.assertTrue(os.path.exists("Fe4P4O16.cif"))
for f in ["INCAR", "KPOINTS", "POSCAR", "POTCAR", "Fe4P4O16.cif"]:
os.remove(f)
self.mitset.write_input(".", make_dir_if_not_present=True, potcar_spec=True)
for f in ["INCAR", "KPOINTS", "POSCAR"]:
self.assertTrue(os.path.exists(f))
self.assertFalse(os.path.exists("POTCAR"))
self.assertTrue(os.path.exists("POTCAR.spec"))
for f in ["INCAR", "KPOINTS", "POSCAR", "POTCAR.spec"]:
os.remove(f)
def test_user_potcar_settings(self):
vis = MPRelaxSet(self.structure, user_potcar_settings={"Fe": "Fe"})
potcar = vis.potcar
self.assertEqual(potcar.symbols, ["Fe", "P", "O"])
class MPStaticSetTest(PymatgenTest):
def setUp(self):
self.tmp = tempfile.mkdtemp()
warnings.simplefilter("ignore")
def test_init(self):
prev_run = self.TEST_FILES_DIR / "relaxation"
vis = MPStaticSet.from_prev_calc(prev_calc_dir=prev_run)
self.assertEqual(vis.incar["NSW"], 0)
# Check that the ENCUT has been inherited.
self.assertEqual(vis.incar["ENCUT"], 600)
self.assertEqual(vis.kpoints.style, Kpoints.supported_modes.Monkhorst)
# Check as from dict.
vis = MPStaticSet.from_dict(vis.as_dict())
self.assertEqual(vis.incar["NSW"], 0)
# Check that the ENCUT has been inherited.
self.assertEqual(vis.incar["ENCUT"], 600)
self.assertEqual(vis.kpoints.style, Kpoints.supported_modes.Monkhorst)
non_prev_vis = MPStaticSet(
vis.structure, user_incar_settings={"LORBIT": 12, "LWAVE": True}
)
self.assertEqual(non_prev_vis.incar["NSW"], 0)
# Check that the ENCUT and Kpoints style has NOT been inherited.
self.assertEqual(non_prev_vis.incar["ENCUT"], 520)
# Check that user incar settings are applied.
self.assertEqual(non_prev_vis.incar["LORBIT"], 12)
self.assertTrue(non_prev_vis.incar["LWAVE"])
self.assertEqual(non_prev_vis.kpoints.style, Kpoints.supported_modes.Gamma)
v2 = MPStaticSet.from_dict(non_prev_vis.as_dict())
self.assertEqual(v2.incar["ENCUT"], 520)
# Check that user incar settings are applied.
self.assertEqual(v2.incar["LORBIT"], 12)
leps_vis = MPStaticSet.from_prev_calc(prev_calc_dir=prev_run, lepsilon=True)
self.assertTrue(leps_vis.incar["LEPSILON"])
self.assertEqual(leps_vis.incar["IBRION"], 8)
self.assertNotIn("NPAR", leps_vis.incar)
self.assertNotIn("NSW", leps_vis.incar)
self.assertEqual(non_prev_vis.kpoints.kpts, [[11, 10, 10]])
non_prev_vis = MPStaticSet(vis.structure, reciprocal_density=200)
self.assertEqual(non_prev_vis.kpoints.kpts, [[14, 12, 12]])
# Check LCALCPOL flag
lcalcpol_vis = MPStaticSet.from_prev_calc(prev_calc_dir=prev_run, lcalcpol=True)
self.assertTrue(lcalcpol_vis.incar["LCALCPOL"])
def test_user_incar_kspacing(self):
# Make sure user KSPACING settings properly overrides KPOINTS.
si = self.get_structure("Si")
vis = MPRelaxSet(si, user_incar_settings={"KSPACING": 0.22})
self.assertEqual(vis.incar["KSPACING"], 0.22)
self.assertEqual(vis.kpoints, None)
def test_kspacing_override(self):
# If KSPACING is set and user_kpoints_settings are given,
# make sure the user_kpoints_settings override KSPACING
si = self.get_structure("Si")
vis = MPRelaxSet(
si,
user_incar_settings={"KSPACING": 0.22},
user_kpoints_settings={"reciprocal_density": 1000},
)
self.assertEqual(vis.incar.get("KSPACING"), None)
self.assertIsInstance(vis.kpoints, Kpoints)
def test_override_from_prev_calc(self):
# test override_from_prev
prev_run = self.TEST_FILES_DIR / "relaxation"
vis = MPStaticSet(_dummy_structure)
vis.override_from_prev_calc(prev_calc_dir=prev_run)
self.assertEqual(vis.incar["NSW"], 0)
self.assertEqual(vis.incar["ENCUT"], 600)
self.assertEqual(vis.kpoints.style, Kpoints.supported_modes.Monkhorst)
# Check LCALCPOL flag
lcalcpol_vis = MPStaticSet(_dummy_structure, lcalcpol=True)
lcalcpol_vis = lcalcpol_vis.override_from_prev_calc(prev_calc_dir=prev_run)
self.assertTrue(lcalcpol_vis.incar["LCALCPOL"])
def test_standardize_structure(self):
sga = SpacegroupAnalyzer(self.get_structure("Si"))
original_structure = sga.get_conventional_standard_structure()
sm = StructureMatcher(primitive_cell=False, scale=False)
vis = MPStaticSet(original_structure)
self.assertTrue(sm.fit(vis.structure, original_structure))
vis = MPStaticSet(original_structure, standardize=True)
self.assertFalse(sm.fit(vis.structure, original_structure))
def test_write_input_zipped(self):
vis = MPStaticSet(self.get_structure("Si"))
vis.write_input(output_dir=".", potcar_spec=True, zip_output=True)
self.assertTrue(os.path.exists("MPStaticSet.zip"))
with ZipFile("MPStaticSet.zip", "r") as zip:
contents = zip.namelist()
self.assertSetEqual(
set(contents), {"INCAR", "POSCAR", "POTCAR.spec", "KPOINTS"}
)
spec = zip.open("POTCAR.spec", "r").read().decode()
self.assertEqual(spec, "Si")
os.remove("MPStaticSet.zip")
def test_conflicting_arguments(self):
with pytest.raises(ValueError, match="deprecated"):
si = self.get_structure("Si")
vis = MPStaticSet(si, potcar_functional="PBE", user_potcar_functional="PBE")
def tearDown(self):
shutil.rmtree(self.tmp)
warnings.simplefilter("default")
class MPNonSCFSetTest(PymatgenTest):
def setUp(self):
self.tmp = tempfile.mkdtemp()
warnings.simplefilter("ignore")
def test_init(self):
prev_run = self.TEST_FILES_DIR / "relaxation"
# check boltztrap mode
vis = MPNonSCFSet.from_prev_calc(prev_calc_dir=prev_run, mode="Boltztrap")
self.assertEqual(vis.incar["ISMEAR"], 0)
# check uniform mode
vis = MPNonSCFSet.from_prev_calc(prev_calc_dir=prev_run, mode="Uniform")
self.assertEqual(vis.incar["ISMEAR"], -5)
self.assertEqual(vis.incar["ISYM"], 2)
# check uniform mode with automatic nedos
vis = MPNonSCFSet.from_prev_calc(prev_calc_dir=prev_run, mode="Uniform",
nedos=0)
self.assertEqual(vis.incar["NEDOS"], 12217)
# test line mode
vis = MPNonSCFSet.from_prev_calc(
prev_calc_dir=prev_run,
mode="Line",
copy_chgcar=False,
user_incar_settings={"SIGMA": 0.025},
)
self.assertEqual(vis.incar["NSW"], 0)
# Check that the ENCUT has been inherited.
self.assertEqual(vis.incar["ENCUT"], 600)
# Check that the user_incar_settings works
self.assertEqual(vis.incar["SIGMA"], 0.025)
self.assertEqual(vis.kpoints.style, Kpoints.supported_modes.Reciprocal)
# Check as from dict.
vis = MPNonSCFSet.from_dict(vis.as_dict())
self.assertEqual(vis.incar["NSW"], 0)
# Check that the ENCUT has been inherited.
self.assertEqual(vis.incar["ENCUT"], 600)
self.assertEqual(vis.kpoints.style, Kpoints.supported_modes.Reciprocal)
vis.write_input(self.tmp)
self.assertFalse(os.path.exists(os.path.join(self.tmp, "CHGCAR")))
vis = MPNonSCFSet.from_prev_calc(
prev_calc_dir=prev_run, mode="Line", copy_chgcar=True
)
# check ISMEAR set correctly for line mode
self.assertEqual(vis.incar["ISMEAR"], 0)
vis.write_input(self.tmp)
self.assertTrue(os.path.exists(os.path.join(self.tmp, "CHGCAR")))
os.remove(os.path.join(self.tmp, "CHGCAR"))
vis = MPNonSCFSet.from_prev_calc(
prev_calc_dir=prev_run, standardize=True, mode="Line", copy_chgcar=True
)
vis.write_input(self.tmp)
self.assertFalse(os.path.exists(os.path.join(self.tmp, "CHGCAR")))
def test_override_from_prev(self):
prev_run = self.TEST_FILES_DIR / "relaxation"
# test override_from_prev
vis = MPNonSCFSet(_dummy_structure, mode="Boltztrap")
vis.override_from_prev_calc(prev_calc_dir=prev_run)
self.assertEqual(vis.incar["ISMEAR"], 0)
vis = MPNonSCFSet(_dummy_structure, mode="Uniform")
vis.override_from_prev_calc(prev_calc_dir=prev_run)
self.assertEqual(vis.incar["ISMEAR"], -5)
self.assertEqual(vis.incar["ISYM"], 2)
vis = MPNonSCFSet(_dummy_structure, mode="Uniform", nedos=0)
vis.override_from_prev_calc(prev_calc_dir=prev_run)
self.assertEqual(vis.incar["NEDOS"], 12217)
# test line mode
vis = MPNonSCFSet(
_dummy_structure,
mode="Line",
copy_chgcar=False,
user_incar_settings={"SIGMA": 0.025},
)
vis.override_from_prev_calc(prev_calc_dir=prev_run)
self.assertEqual(vis.incar["NSW"], 0)
self.assertEqual(vis.incar["ENCUT"], 600)
self.assertEqual(vis.incar["SIGMA"], 0.025)
self.assertEqual(vis.kpoints.style, Kpoints.supported_modes.Reciprocal)
vis = MPNonSCFSet(_dummy_structure, mode="Line", copy_chgcar=True)
vis.override_from_prev_calc(prev_calc_dir=prev_run)
self.assertEqual(vis.incar["ISMEAR"], 0)
vis.write_input(self.tmp)
self.assertTrue(os.path.exists(os.path.join(self.tmp, "CHGCAR")))
os.remove(os.path.join(self.tmp, "CHGCAR"))
vis = MPNonSCFSet(
_dummy_structure, standardize=True, mode="Line", copy_chgcar=True
)
vis.override_from_prev_calc(prev_calc_dir=prev_run)
vis.write_input(self.tmp)
self.assertFalse(os.path.exists(os.path.join(self.tmp, "CHGCAR")))
def test_kpoints(self):
# test k-points are generated in the correct format
prev_run = self.TEST_FILES_DIR / "relaxation"
vis = MPNonSCFSet.from_prev_calc(
prev_calc_dir=prev_run, mode="Uniform", copy_chgcar=False
)
self.assertEqual(np.array(vis.kpoints.kpts).shape, (1, 3))
vis = MPNonSCFSet.from_prev_calc(
prev_calc_dir=prev_run, mode="Line", copy_chgcar=False
)
self.assertNotEqual(np.array(vis.kpoints.kpts).shape, (1, 3))
vis = MPNonSCFSet.from_prev_calc(
prev_calc_dir=prev_run, mode="Boltztrap", copy_chgcar=False
)
self.assertNotEqual(np.array(vis.kpoints.kpts).shape, (1, 3))
def test_optics(self):
prev_run = self.TEST_FILES_DIR / "relaxation"
vis = MPNonSCFSet.from_prev_calc(
prev_calc_dir=prev_run,
copy_chgcar=False,
optics=True,
mode="Uniform",
nedos=2001,
)
self.assertEqual(vis.incar["NSW"], 0)
# Check that the ENCUT has been inherited.
self.assertEqual(vis.incar["ENCUT"], 600)
# check NEDOS and ISMEAR set correctly
self.assertEqual(vis.incar["NEDOS"], 2001)
self.assertEqual(vis.incar["ISMEAR"], -5)
self.assertEqual(vis.incar["ISYM"], 2)
self.assertTrue(vis.incar["LOPTICS"])
self.assertEqual(vis.kpoints.style, Kpoints.supported_modes.Gamma)
def test_user_kpoint_override(self):
user_kpoints_override = Kpoints(
style=Kpoints.supported_modes.Gamma, kpts=((1, 1, 1),)
) # the default kpoints style is reciprocal
prev_run = self.TEST_FILES_DIR / "relaxation"
vis = MPNonSCFSet.from_prev_calc(
prev_calc_dir=prev_run,
copy_chgcar=False,
optics=True,
mode="Uniform",
nedos=2001,
user_kpoints_settings=user_kpoints_override,
)
self.assertEqual(vis.kpoints.style, Kpoints.supported_modes.Gamma)
def tearDown(self):
shutil.rmtree(self.tmp)
warnings.simplefilter("default")
class MagmomLdauTest(PymatgenTest):
def setUp(self):
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
def test_structure_from_prev_run(self):
vrun = Vasprun(self.TEST_FILES_DIR / "vasprun.xml.magmom_ldau")
structure = vrun.final_structure
poscar = Poscar(structure)
structure_decorated = get_structure_from_prev_run(vrun)
ldau_ans = {"LDAUU": [5.3, 0.0], "LDAUL": [2, 0], "LDAUJ": [0.0, 0.0]}
magmom_ans = [5.0, 5.0, 5.0, 5.0, 0.6, 0.6, 0.6, 0.6, 0.6, 0.6]
ldau_dict = {}
for key in ("LDAUU", "LDAUJ", "LDAUL"):
if hasattr(structure_decorated[0], key.lower()):
m = dict(
[
(site.specie.symbol, getattr(site, key.lower()))
for site in structure_decorated
]
)
ldau_dict[key] = [m[sym] for sym in poscar.site_symbols]
magmom = [site.magmom for site in structure_decorated]
self.assertEqual(ldau_dict, ldau_ans)
self.assertEqual(magmom, magmom_ans)
def test_ln_magmom(self):
YAML_PATH = os.path.join(os.path.dirname(__file__), "../VASPIncarBase.yaml")
MAGMOM_SETTING = loadfn(YAML_PATH)["INCAR"]["MAGMOM"]
structure = Structure.from_file(self.TEST_FILES_DIR / "La4Fe4O12.cif")
structure.add_oxidation_state_by_element({"La": +3, "Fe": +3, "O": -2})
for ion in MAGMOM_SETTING:
s = structure.copy()
s.replace_species({"La3+": ion})
vis = MPRelaxSet(s)
fe_pos = vis.poscar.comment.index("Fe")
if fe_pos == 0:
magmom_ans = [5] * 4 + [MAGMOM_SETTING[ion]] * 4 + [0.6] * 12
else:
magmom_ans = [MAGMOM_SETTING[ion]] * 4 + [5] * 4 + [0.6] * 12
self.assertEqual(vis.incar["MAGMOM"], magmom_ans)
class MITMDSetTest(PymatgenTest):
def setUp(self):
filepath = self.TEST_FILES_DIR / "POSCAR"
poscar = Poscar.from_file(filepath)
self.struct = poscar.structure
self.mitmdparam = MITMDSet(self.struct, 300, 1200, 10000)
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
def test_params(self):
param = self.mitmdparam
syms = param.potcar_symbols
self.assertEqual(syms, ["Fe", "P", "O"])
incar = param.incar
self.assertNotIn("LDAUU", incar)
self.assertAlmostEqual(incar["EDIFF"], 1e-5)
kpoints = param.kpoints
self.assertEqual(kpoints.kpts, [(1, 1, 1)])
self.assertEqual(kpoints.style, Kpoints.supported_modes.Gamma)
def test_as_from_dict(self):
d = self.mitmdparam.as_dict()
v = dec.process_decoded(d)
self.assertEqual(type(v), MITMDSet)
self.assertEqual(v._config_dict["INCAR"]["TEBEG"], 300)
self.assertEqual(v._config_dict["INCAR"]["PREC"], "Low")
class MVLNPTMDSetTest(PymatgenTest):
def setUp(self):
file_path = self.TEST_FILES_DIR / "POSCAR"
poscar = Poscar.from_file(file_path)
self.struct = poscar.structure
self.mvl_npt_set = MVLNPTMDSet(
self.struct, start_temp=0, end_temp=300, nsteps=1000
)
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
def test_incar(self):
npt_set = self.mvl_npt_set
syms = npt_set.potcar_symbols
self.assertEqual(syms, ["Fe", "P", "O"])
incar = npt_set.incar
self.assertNotIn("LDAUU", incar)
self.assertAlmostEqual(incar["EDIFF"], 1e-5)
self.assertEqual(incar["LANGEVIN_GAMMA_L"], 1)
self.assertEqual(incar["LANGEVIN_GAMMA"], [10, 10, 10])
enmax = max(
[npt_set.potcar[i].keywords["ENMAX"] for i in range(self.struct.ntypesp)]
)
self.assertAlmostEqual(incar["ENCUT"], 1.5 * enmax)
self.assertEqual(incar["IALGO"], 48)
self.assertEqual(incar["ISIF"], 3)
self.assertEqual(incar["MDALGO"], 3)
self.assertEqual(incar["SMASS"], 0)
self.assertEqual(incar["PREC"], "Low")
kpoints = npt_set.kpoints
self.assertEqual(kpoints.kpts, [(1, 1, 1)])
self.assertEqual(kpoints.style, Kpoints.supported_modes.Gamma)
def test_as_from_dict(self):
d = self.mvl_npt_set.as_dict()
v = dec.process_decoded(d)
self.assertEqual(type(v), MVLNPTMDSet)
self.assertEqual(v._config_dict["INCAR"]["NSW"], 1000)
class MITNEBSetTest(PymatgenTest):
def setUp(self):
c1 = [[0.5] * 3, [0.9] * 3]
c2 = [[0.5] * 3, [0.9, 0.1, 0.1]]
s1 = Structure(Lattice.cubic(5), ["Si", "Si"], c1)
s2 = Structure(Lattice.cubic(5), ["Si", "Si"], c2)
structs = []
for s in s1.interpolate(s2, 3, pbc=True):
structs.append(Structure.from_sites(s.sites, to_unit_cell=True))
self.structures = structs
self.vis = MITNEBSet(self.structures)
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
def test_potcar_symbols(self):
syms = self.vis.potcar_symbols
self.assertEqual(syms, ["Si"])
def test_incar(self):
incar = self.vis.incar
self.assertNotIn("LDAUU", incar)
self.assertAlmostEqual(incar["EDIFF"], 0.00001)
def test_kpoints(self):
kpoints = self.vis.kpoints
self.assertEqual(kpoints.kpts, [[25]])
self.assertEqual(kpoints.style, Kpoints.supported_modes.Automatic)
def test_as_from_dict(self):
d = self.vis.as_dict()
v = dec.process_decoded(d)
self.assertEqual(v._config_dict["INCAR"]["IMAGES"], 2)
def test_write_input(self):
self.vis.write_input(
".", write_cif=True, write_endpoint_inputs=True, write_path_cif=True
)
self.assertTrue(os.path.exists("INCAR"))
self.assertTrue(os.path.exists("KPOINTS"))
self.assertTrue(os.path.exists("POTCAR"))
self.assertTrue(os.path.exists("00/POSCAR"))
self.assertTrue(os.path.exists("01/POSCAR"))
self.assertTrue(os.path.exists("02/POSCAR"))
self.assertTrue(os.path.exists("03/POSCAR"))
self.assertFalse(os.path.exists("04/POSCAR"))
self.assertTrue(os.path.exists("00/INCAR"))
self.assertTrue(os.path.exists("path.cif"))
for d in ["00", "01", "02", "03"]:
shutil.rmtree(d)
for f in ["INCAR", "KPOINTS", "POTCAR", "path.cif"]:
os.remove(f)
class MPSOCSetTest(PymatgenTest):
def setUp(self):
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
def test_from_prev_calc(self):
prev_run = self.TEST_FILES_DIR / "fe_monomer"
vis = MPSOCSet.from_prev_calc(
prev_calc_dir=prev_run,
magmom=[3],
saxis=(1, 0, 0),
user_incar_settings={"SIGMA": 0.025},
)
self.assertEqual(vis.incar["ISYM"], -1)
self.assertTrue(vis.incar["LSORBIT"])
self.assertEqual(vis.incar["ICHARG"], 11)
self.assertEqual(vis.incar["SAXIS"], [1, 0, 0])
self.assertEqual(vis.incar["MAGMOM"], [[0, 0, 3]])
self.assertEqual(vis.incar["SIGMA"], 0.025)
def test_override_from_prev_calc(self):
# test override_from_prev_calc
prev_run = self.TEST_FILES_DIR / "fe_monomer"
vis = MPSOCSet(
_dummy_structure,
magmom=[3],
saxis=(1, 0, 0),
user_incar_settings={"SIGMA": 0.025},
)
vis.override_from_prev_calc(prev_calc_dir=prev_run)
self.assertEqual(vis.incar["ISYM"], -1)
self.assertTrue(vis.incar["LSORBIT"])
self.assertEqual(vis.incar["ICHARG"], 11)
self.assertEqual(vis.incar["SAXIS"], [1, 0, 0])
self.assertEqual(vis.incar["MAGMOM"], [[0, 0, 3]])
self.assertEqual(vis.incar["SIGMA"], 0.025)
class MPNMRSetTest(PymatgenTest):
def setUp(self):
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
def test_incar(self):
filepath = self.TEST_FILES_DIR / "Li.cif"
structure = Structure.from_file(filepath)
vis = MPNMRSet(structure)
self.assertTrue(vis.incar.get("LCHIMAG", None))
self.assertEqual(vis.incar.get("QUAD_EFG", None), None)
vis = MPNMRSet(structure, mode="efg")
self.assertFalse(vis.incar.get("LCHIMAG", None))
self.assertEqual(vis.incar.get("QUAD_EFG", None), [-0.808])
vis = MPNMRSet(structure, mode="efg", isotopes=["Li-7"])
self.assertFalse(vis.incar.get("LCHIMAG", None))
self.assertEqual(vis.incar.get("QUAD_EFG", None), [-40.1])
class MVLSlabSetTest(PymatgenTest):
def setUp(self):
s = self.get_structure("Li2O")
gen = SlabGenerator(s, (1, 0, 0), 10, 10)
self.slab = gen.get_slab()
self.bulk = self.slab.oriented_unit_cell
vis_bulk = MVLSlabSet(self.bulk, bulk=True)
vis = MVLSlabSet(self.slab)
vis_dipole = MVLSlabSet(self.slab, auto_dipole=True)
self.d_bulk = vis_bulk.get_vasp_input()
self.d_slab = vis.get_vasp_input()
self.d_dipole = vis_dipole.get_vasp_input()
self.vis = vis
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
def test_user_incar_settings(self):
# Make sure user incar settings properly override AMIX.
si = self.get_structure("Si")
vis = MVLSlabSet(si, user_incar_settings={"AMIX": 0.1})
self.assertEqual(vis.incar["AMIX"], 0.1)
def test_bulk(self):
incar_bulk = self.d_bulk["INCAR"]
poscar_bulk = self.d_bulk["POSCAR"]
self.assertEqual(incar_bulk["ISIF"], 3)
self.assertEqual(incar_bulk["EDIFF"], 1e-4)
self.assertEqual(incar_bulk["EDIFFG"], -0.02)
self.assertEqual(poscar_bulk.structure.formula, self.bulk.formula)
def test_slab(self):
incar_slab = self.d_slab["INCAR"]
poscar_slab = self.d_slab["POSCAR"]
potcar_slab = self.d_slab["POTCAR"]
self.assertEqual(incar_slab["AMIN"], 0.01)
self.assertEqual(incar_slab["AMIX"], 0.2)
self.assertEqual(incar_slab["BMIX"], 0.001)
self.assertEqual(incar_slab["NELMIN"], 8)
# No volume relaxation during slab calculations
self.assertEqual(incar_slab["ISIF"], 2)
self.assertEqual(potcar_slab.functional, "PBE")
self.assertEqual(potcar_slab.symbols[1], u"O")
self.assertEqual(potcar_slab.symbols[0], u"Li_sv")
self.assertEqual(poscar_slab.structure.formula, self.slab.formula)
# Test auto-dipole
dipole_incar = self.d_dipole["INCAR"]
self.assertTrue(dipole_incar["LDIPOL"])
self.assertArrayAlmostEqual(
dipole_incar["DIPOL"], [0.2323, 0.2323, 0.2165], decimal=4
)
self.assertEqual(dipole_incar["IDIPOL"], 3)
def test_kpoints(self):
kpoints_slab = self.d_slab["KPOINTS"].kpts[0]
kpoints_bulk = self.d_bulk["KPOINTS"].kpts[0]
self.assertEqual(kpoints_bulk[0], kpoints_slab[0])
self.assertEqual(kpoints_bulk[1], kpoints_slab[1])
self.assertEqual(kpoints_bulk[0], 15)
self.assertEqual(kpoints_bulk[1], 15)
self.assertEqual(kpoints_bulk[2], 15)
# The last kpoint in a slab should always be 1
self.assertEqual(kpoints_slab[2], 1)
def test_as_dict(self):
vis_dict = self.vis.as_dict()
new = MVLSlabSet.from_dict(vis_dict)
class MVLElasticSetTest(PymatgenTest):
def setUp(self):
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
def test_incar(self):
mvlparam = MVLElasticSet(self.get_structure("Graphite"))
incar = mvlparam.incar
self.assertEqual(incar["IBRION"], 6)
self.assertEqual(incar["NFREE"], 2)
self.assertEqual(incar["POTIM"], 0.015)
self.assertNotIn("NPAR", incar)
class MVLGWSetTest(PymatgenTest):
def setUp(self):
self.tmp = tempfile.mkdtemp()
self.s = PymatgenTest.get_structure("Li2O")
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
shutil.rmtree(self.tmp)
def test_static(self):
mvlgwsc = MVLGWSet(self.s)
incar = mvlgwsc.incar
self.assertEqual(incar["SIGMA"], 0.01)
kpoints = mvlgwsc.kpoints
self.assertEqual(kpoints.style, Kpoints.supported_modes.Gamma)
symbols = mvlgwsc.potcar.symbols
self.assertEqual(symbols, ["Li_sv_GW", "O_GW"])
def test_diag(self):
prev_run = self.TEST_FILES_DIR / "relaxation"
mvlgwdiag = MVLGWSet.from_prev_calc(prev_run, copy_wavecar=True, mode="diag")
mvlgwdiag.write_input(self.tmp)
self.assertTrue(os.path.exists(os.path.join(self.tmp, "WAVECAR")))
self.assertEqual(mvlgwdiag.incar["NBANDS"], 32)
self.assertEqual(mvlgwdiag.incar["ALGO"], "Exact")
self.assertTrue(mvlgwdiag.incar["LOPTICS"])
# test override_from_prev_calc
mvlgwdiag = MVLGWSet(_dummy_structure, copy_wavecar=True, mode="diag")
mvlgwdiag.override_from_prev_calc(prev_calc_dir=prev_run)
mvlgwdiag.write_input(self.tmp)
self.assertTrue(os.path.exists(os.path.join(self.tmp, "WAVECAR")))
self.assertEqual(mvlgwdiag.incar["NBANDS"], 32)
self.assertEqual(mvlgwdiag.incar["ALGO"], "Exact")
self.assertTrue(mvlgwdiag.incar["LOPTICS"])
def test_bse(self):
prev_run = self.TEST_FILES_DIR / "relaxation"
mvlgwgbse = MVLGWSet.from_prev_calc(prev_run, copy_wavecar=True, mode="BSE")
mvlgwgbse.write_input(self.tmp)
self.assertTrue(os.path.exists(os.path.join(self.tmp, "WAVECAR")))
self.assertTrue(os.path.exists(os.path.join(self.tmp, "WAVEDER")))
prev_run = self.TEST_FILES_DIR / "relaxation"
mvlgwgbse = MVLGWSet.from_prev_calc(prev_run, copy_wavecar=False, mode="GW")
self.assertEqual(mvlgwgbse.incar["NOMEGA"], 80)
self.assertEqual(mvlgwgbse.incar["ENCUTGW"], 250)
self.assertEqual(mvlgwgbse.incar["ALGO"], "GW0")
mvlgwgbse1 = MVLGWSet.from_prev_calc(prev_run, copy_wavecar=False, mode="BSE")
self.assertEqual(mvlgwgbse1.incar["ANTIRES"], 0)
self.assertEqual(mvlgwgbse1.incar["NBANDSO"], 20)
self.assertEqual(mvlgwgbse1.incar["ALGO"], "BSE")
# test override_from_prev_calc
prev_run = self.TEST_FILES_DIR / "relaxation"
mvlgwgbse = MVLGWSet(_dummy_structure, copy_wavecar=True, mode="BSE")
mvlgwgbse.override_from_prev_calc(prev_calc_dir=prev_run)
mvlgwgbse.write_input(self.tmp)
self.assertTrue(os.path.exists(os.path.join(self.tmp, "WAVECAR")))
self.assertTrue(os.path.exists(os.path.join(self.tmp, "WAVEDER")))
prev_run = self.TEST_FILES_DIR / "relaxation"
mvlgwgbse = MVLGWSet(_dummy_structure, copy_wavecar=True, mode="GW")
mvlgwgbse.override_from_prev_calc(prev_calc_dir=prev_run)
self.assertEqual(mvlgwgbse.incar["NOMEGA"], 80)
self.assertEqual(mvlgwgbse.incar["ENCUTGW"], 250)
self.assertEqual(mvlgwgbse.incar["ALGO"], "GW0")
mvlgwgbse1 = MVLGWSet(_dummy_structure, copy_wavecar=False, mode="BSE")
mvlgwgbse1.override_from_prev_calc(prev_calc_dir=prev_run)
self.assertEqual(mvlgwgbse1.incar["ANTIRES"], 0)
self.assertEqual(mvlgwgbse1.incar["NBANDSO"], 20)
self.assertEqual(mvlgwgbse1.incar["ALGO"], "BSE")
class MPHSEBSTest(PymatgenTest):
def setUp(self):
self.tmp = tempfile.mkdtemp()
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
def test_init(self):
prev_run = self.TEST_FILES_DIR / "static_silicon"
vis = MPHSEBSSet.from_prev_calc(prev_calc_dir=prev_run, mode="uniform")
self.assertTrue(vis.incar["LHFCALC"])
self.assertEqual(len(vis.kpoints.kpts), 16)
vis = MPHSEBSSet.from_prev_calc(prev_calc_dir=prev_run, mode="gap")
self.assertTrue(vis.incar["LHFCALC"])
self.assertEqual(len(vis.kpoints.kpts), 18)
vis = MPHSEBSSet.from_prev_calc(prev_calc_dir=prev_run, mode="line")
self.assertTrue(vis.incar["LHFCALC"])
self.assertEqual(vis.incar["HFSCREEN"], 0.2)
self.assertEqual(vis.incar["NSW"], 0)
self.assertEqual(vis.incar["ISYM"], 3)
self.assertEqual(len(vis.kpoints.kpts), 180)
def test_override_from_prev_calc(self):
prev_run = self.TEST_FILES_DIR / "static_silicon"
vis = MPHSEBSSet(_dummy_structure, mode="uniform")
vis = vis.override_from_prev_calc(prev_calc_dir=prev_run)
self.assertTrue(vis.incar["LHFCALC"])
self.assertEqual(len(vis.kpoints.kpts), 16)
vis = MPHSEBSSet(_dummy_structure, mode="gap")
vis = vis.override_from_prev_calc(prev_calc_dir=prev_run)
self.assertTrue(vis.incar["LHFCALC"])
self.assertEqual(len(vis.kpoints.kpts), 18)
vis = MPHSEBSSet(_dummy_structure, mode="line")
vis = vis.override_from_prev_calc(prev_calc_dir=prev_run)
self.assertTrue(vis.incar["LHFCALC"])
self.assertEqual(vis.incar["HFSCREEN"], 0.2)
self.assertEqual(vis.incar["NSW"], 0)
self.assertEqual(vis.incar["ISYM"], 3)
self.assertEqual(len(vis.kpoints.kpts), 180)
class MVLScanRelaxSetTest(PymatgenTest):
def setUp(self):
file_path = self.TEST_FILES_DIR / "POSCAR"
poscar = Poscar.from_file(file_path)
self.struct = poscar.structure
self.mvl_scan_set = MVLScanRelaxSet(
self.struct, potcar_functional="PBE_52", user_incar_settings={"NSW": 500}
)
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
def test_incar(self):
incar = self.mvl_scan_set.incar
self.assertIn("METAGGA", incar)
self.assertIn("LASPH", incar)
self.assertIn("ADDGRID", incar)
self.assertEqual(incar["NSW"], 500)
# Test SCAN+rVV10
scan_rvv10_set = MVLScanRelaxSet(self.struct, vdw="rVV10")
self.assertEqual(scan_rvv10_set.incar["BPARAM"], 15.7)
def test_potcar(self):
self.assertEqual(self.mvl_scan_set.potcar.functional, "PBE_52")
test_potcar_set_1 = MVLScanRelaxSet(self.struct, potcar_functional="PBE_54")
self.assertEqual(test_potcar_set_1.potcar.functional, "PBE_54")
self.assertRaises(
ValueError, MVLScanRelaxSet, self.struct, potcar_functional="PBE"
)
def test_as_from_dict(self):
d = self.mvl_scan_set.as_dict()
v = dec.process_decoded(d)
self.assertEqual(type(v), MVLScanRelaxSet)
self.assertEqual(v._config_dict["INCAR"]["METAGGA"], "SCAN")
self.assertEqual(v.user_incar_settings["NSW"], 500)
class MPScanRelaxSetTest(PymatgenTest):
def setUp(self):
file_path = self.TEST_FILES_DIR / "POSCAR"
poscar = Poscar.from_file(file_path)
self.struct = poscar.structure
self.mp_scan_set = MPScanRelaxSet(
self.struct, potcar_functional="PBE_52", user_incar_settings={"NSW": 500}
)
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
def test_incar(self):
incar = self.mp_scan_set.incar
self.assertIn("METAGGA", incar)
self.assertIn("LASPH", incar)
self.assertIn("ADDGRID", incar)
self.assertEqual(incar["NSW"], 500)
# the default POTCAR contains metals
self.assertEqual(incar["KSPACING"], 0.22)
self.assertEqual(incar["ISMEAR"], 2)
self.assertEqual(incar["SIGMA"], 0.2)
def test_nonmetal(self):
# Test that KSPACING and ISMEAR change with a nonmetal structure
file_path = self.TEST_FILES_DIR / "POSCAR.O2"
struct = Poscar.from_file(file_path, check_for_POTCAR=False).structure
scan_nonmetal_set = MPScanRelaxSet(struct, bandgap=1.1)
incar = scan_nonmetal_set.incar
self.assertAlmostEqual(incar["KSPACING"], 0.29125, places=5)
self.assertEqual(incar["ISMEAR"], -5)
self.assertEqual(incar["SIGMA"], 0.05)
def test_incar_overrides(self):
# use 'user_incar_settings' to override the KSPACING, ISMEAR, and SIGMA
# parameters that MPScanSet normally determines
mp_scan_set2 = MPScanRelaxSet(
self.struct,
user_incar_settings={"KSPACING": 0.5, "ISMEAR": 0, "SIGMA": 0.05},
)
incar = mp_scan_set2.incar
self.assertEqual(incar["KSPACING"], 0.5)
self.assertEqual(incar["ISMEAR"], 0)
self.assertEqual(incar["SIGMA"], 0.05)
# Test SCAN+rVV10
def test_rvv10(self):
scan_rvv10_set = MPScanRelaxSet(self.struct, vdw="rVV10")
self.assertIn("LUSE_VDW", scan_rvv10_set.incar)
self.assertEqual(scan_rvv10_set.incar["BPARAM"], 15.7)
def test_other_vdw(self):
# should raise a warning.
# IVDW key should not be present in the incar
with pytest.warns(UserWarning, match=r"not supported at this time"):
scan_vdw_set = MPScanRelaxSet(self.struct, vdw="DFTD3")
self.assertNotIn("LUSE_VDW", scan_vdw_set.incar)
self.assertNotIn("IVDW", scan_vdw_set.incar)
def test_potcar(self):
self.assertEqual(self.mp_scan_set.potcar.functional, "PBE_52")
# the default functional should be PBE_54
test_potcar_set_1 = MPScanRelaxSet(self.struct)
self.assertEqual(test_potcar_set_1.potcar.functional, "PBE_54")
self.assertRaises(
ValueError, MPScanRelaxSet, self.struct, potcar_functional="PBE"
)
def test_as_from_dict(self):
d = self.mp_scan_set.as_dict()
v = dec.process_decoded(d)
self.assertEqual(type(v), MPScanRelaxSet)
self.assertEqual(v._config_dict["INCAR"]["METAGGA"], "SCAN")
self.assertEqual(v.user_incar_settings["NSW"], 500)
def test_write_input(self):
self.mp_scan_set.write_input(
"."
)
self.assertTrue(os.path.exists("INCAR"))
self.assertFalse(os.path.exists("KPOINTS"))
self.assertTrue(os.path.exists("POTCAR"))
self.assertTrue(os.path.exists("POSCAR"))
for f in ["INCAR", "POSCAR", "POTCAR"]:
os.remove(f)
class FuncTest(PymatgenTest):
def test_batch_write_input(self):
structures = [
PymatgenTest.get_structure("Li2O"),
PymatgenTest.get_structure("LiFePO4"),
]
batch_write_input(structures)
for d in ["Li4Fe4P4O16_1", "Li2O1_0"]:
for f in ["INCAR", "KPOINTS", "POSCAR", "POTCAR"]:
self.assertTrue(os.path.exists(os.path.join(d, f)))
for d in ["Li4Fe4P4O16_1", "Li2O1_0"]:
shutil.rmtree(d)
class MVLGBSetTest(PymatgenTest):
def setUp(self):
filepath = self.TEST_FILES_DIR / "Li.cif"
self.s = Structure.from_file(filepath)
self.bulk = MVLGBSet(self.s)
self.slab = MVLGBSet(self.s, slab_mode=True)
self.d_bulk = self.bulk.get_vasp_input()
self.d_slab = self.slab.get_vasp_input()
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
def test_bulk(self):
incar_bulk = self.d_bulk["INCAR"]
self.assertEqual(incar_bulk["ISIF"], 3)
def test_slab(self):
incar_slab = self.d_slab["INCAR"]
self.assertEqual(incar_slab["ISIF"], 2)
def test_kpoints(self):
kpoints = self.d_slab["KPOINTS"]
k_a = int(40 / (self.s.lattice.abc[0]) + 0.5)
k_b = int(40 / (self.s.lattice.abc[1]) + 0.5)
self.assertEqual(kpoints.kpts, [[k_a, k_b, 1]])
class MVLRelax52SetTest(PymatgenTest):
def setUp(self):
file_path = self.TEST_FILES_DIR / "POSCAR"
poscar = Poscar.from_file(file_path)
self.struct = poscar.structure
self.mvl_rlx_set = MVLRelax52Set(
self.struct, potcar_functional="PBE_54", user_incar_settings={"NSW": 500}
)
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
def test_incar(self):
incar = self.mvl_rlx_set.incar
self.assertIn("NSW", incar)
self.assertEqual(incar["LREAL"], "Auto")
def test_potcar(self):
self.assertEqual(self.mvl_rlx_set.potcar.functional, "PBE_54")
self.assertIn("Fe", self.mvl_rlx_set.potcar.symbols)
self.struct.remove_species(["Fe"])
test_potcar_set_1 = MVLRelax52Set(self.struct, potcar_functional="PBE_52")
self.assertEqual(test_potcar_set_1.potcar.functional, "PBE_52")
self.assertRaises(
ValueError, MVLRelax52Set, self.struct, potcar_functional="PBE"
)
def test_potcar_functional_warning(self):
with pytest.warns(DeprecationWarning, match="argument is deprecated"):
test_potcar_set_1 = MVLRelax52Set(self.struct, potcar_functional="PBE_52")
def test_as_from_dict(self):
d = self.mvl_rlx_set.as_dict()
v = dec.process_decoded(d)
self.assertEqual(type(v), MVLRelax52Set)
self.assertEqual(v.incar["NSW"], 500)
class LobsterSetTest(PymatgenTest):
# TODO: what kind of tests should I write for this?
def setUp(self):
file_path = self.TEST_FILES_DIR / "POSCAR"
poscar = Poscar.from_file(file_path)
self.struct = poscar.structure
# test for different parameters!
self.lobsterset1 = LobsterSet(self.struct, isym=-1, ismear=-5)
self.lobsterset2 = LobsterSet(self.struct, isym=0, ismear=0)
# only allow isym=-1 and isym=0
with self.assertRaises(ValueError):
self.lobsterset_new = LobsterSet(self.struct, isym=2, ismear=0)
# only allow ismear=-5 and ismear=0
with self.assertRaises(ValueError):
self.lobsterset_new = LobsterSet(self.struct, isym=-1, ismear=2)
# test if one can still hand over grid density of kpoints
self.lobsterset3 = LobsterSet(
self.struct, isym=0, ismear=0, user_kpoints_settings={"grid_density": 6000}
)
# check if users can overwrite settings in this class with the help of user_incar_settings
self.lobsterset4 = LobsterSet(self.struct, user_incar_settings={"ALGO": "Fast"})
# use basis functions supplied by user
self.lobsterset5 = LobsterSet(
self.struct,
user_supplied_basis={"Fe": "3d 3p 4s", "P": "3p 3s", "O": "2p 2s"},
)
with self.assertRaises(ValueError):
self.lobsterset6 = LobsterSet(
self.struct, user_supplied_basis={"Fe": "3d 3p 4s", "P": "3p 3s"}
)
self.lobsterset7 = LobsterSet(
self.struct,
address_basis_file=os.path.join(MODULE_DIR, "../../BASIS_PBE_54.yaml"),
)
with pytest.warns(BadInputSetWarning, match="Overriding the POTCAR"):
self.lobsterset6 = LobsterSet(self.struct)
def test_incar(self):
incar1 = self.lobsterset1.incar
self.assertIn("NBANDS", incar1)
self.assertEqual(incar1["NBANDS"], 116)
self.assertEqual(incar1["NSW"], 0)
self.assertEqual(incar1["NSW"], 0)
self.assertEqual(incar1["ISMEAR"], -5)
self.assertEqual(incar1["ISYM"], -1)
self.assertEqual(incar1["ALGO"], "Normal")
incar2 = self.lobsterset2.incar
self.assertEqual(incar2["ISYM"], 0)
self.assertEqual(incar2["ISMEAR"], 0)
incar4 = self.lobsterset4.incar
self.assertEqual(incar4["ALGO"], "Fast")
def test_kpoints(self):
kpoints1 = self.lobsterset1.kpoints
self.assertTrue(kpoints1.comment.split(" ")[6], 6138)
kpoints2 = self.lobsterset2.kpoints
self.assertTrue(kpoints2.comment.split(" ")[6], 6138)
kpoints3 = self.lobsterset3.kpoints
self.assertTrue(kpoints3.comment.split(" ")[6], 6000)
def test_potcar(self):
# PBE_54 is preferred at the moment
self.assertEqual(self.lobsterset1.potcar_functional, "PBE_54")
def test_as_from_dict(self):
dict_here = self.lobsterset1.as_dict()
lobsterset_new = LobsterSet.from_dict(dict_here)
# test relevant parts again
incar1 = lobsterset_new.incar
self.assertIn("NBANDS", incar1)
self.assertEqual(incar1["NBANDS"], 116)
self.assertEqual(incar1["NSW"], 0)
self.assertEqual(incar1["NSW"], 0)
self.assertEqual(incar1["ISMEAR"], -5)
self.assertEqual(incar1["ISYM"], -1)
self.assertEqual(incar1["ALGO"], "Normal")
kpoints1 = lobsterset_new.kpoints
self.assertTrue(kpoints1.comment.split(" ")[6], 6138)
self.assertEqual(lobsterset_new.potcar_functional, "PBE_54")
_dummy_structure = Structure(
[1, 0, 0, 0, 1, 0, 0, 0, 1],
["I"],
[[0, 0, 0]],
site_properties={"magmom": [[0, 0, 1]]},
)
if __name__ == "__main__":
unittest.main()
|
gVallverdu/pymatgen
|
pymatgen/io/vasp/tests/test_sets.py
|
Python
|
mit
| 57,559
|
[
"BoltzTrap",
"VASP",
"pymatgen"
] |
aa7830256581503b6342cbe05c9df7f1ed684837a6570f896cb14dcae617d0c0
|
# (C) British Crown Copyright 2017, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
Routines for lazy data handling.
To avoid replicating implementation-dependent test and conversion code.
"""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
import dask
import dask.array as da
import dask.context
from dask.local import get_sync as dget_sync
import numpy as np
import numpy.ma as ma
def is_lazy_data(data):
"""
Return whether the argument is an Iris 'lazy' data array.
At present, this means simply a Dask array.
We determine this by checking for a "compute" property.
"""
result = hasattr(data, 'compute')
return result
# A magic value, chosen to minimise chunk creation time and chunk processing
# time within dask.
_MAX_CHUNK_SIZE = 8 * 1024 * 1024 * 2
def _limited_shape(shape):
# Reduce a shape to less than a default overall number-of-points, reducing
# earlier dimensions preferentially.
# Note: this is only a heuristic, assuming that earlier dimensions are
# 'outer' storage dimensions -- not *always* true, even for NetCDF data.
shape = list(shape)
i_reduce = 0
while np.prod(shape) > _MAX_CHUNK_SIZE:
factor = np.ceil(np.prod(shape) / _MAX_CHUNK_SIZE)
new_dim = int(shape[i_reduce] / factor)
if new_dim < 1:
new_dim = 1
shape[i_reduce] = new_dim
i_reduce += 1
return tuple(shape)
def _getall(a):
res = a[()]
if isinstance(res, ma.core.MaskedConstant):
res = ma.masked_array(res.data, mask=res.mask)
return res
_getall_delayed = dask.delayed(_getall)
def as_lazy_data(data, chunks=None, asarray=False):
"""
Convert the input array `data` to a dask array.
Args:
* data:
An array. This will be converted to a dask array.
Kwargs:
* chunks:
Describes how the created dask array should be split up. Defaults to a
value first defined in biggus (being `8 * 1024 * 1024 * 2`).
For more information see
http://dask.pydata.org/en/latest/array-creation.html#chunks.
* asarray:
If True, then chunks will be converted to instances of `ndarray`.
Set to False (default) to pass passed chunks through unchanged.
Returns:
The input array converted to a dask array.
"""
if chunks is None:
# Default to the shape of the wrapped array-like,
# but reduce it if larger than a default maximum size.
chunks = _limited_shape(data.shape)
if not is_lazy_data(data):
if data.shape == ():
# Workaround for https://github.com/dask/dask/issues/2823. Make
# sure scalar dask arrays return numpy objects.
dtype = data.dtype
data = _getall_delayed(data)
data = da.from_delayed(data, (), dtype)
else:
data = da.from_array(data, chunks=chunks, asarray=asarray)
return data
def as_concrete_data(data):
"""
Return the actual content of a lazy array, as a numpy array.
If the input data is a NumPy `ndarray` or masked array, return it
unchanged.
If the input data is lazy, return the realised result.
Args:
* data:
A dask array, NumPy `ndarray` or masked array
Returns:
A NumPy `ndarray` or masked array.
"""
if is_lazy_data(data):
# Realise dask array, ensuring the data result is always a NumPy array.
# In some cases dask may return a scalar numpy.int/numpy.float object
# rather than a numpy.ndarray object.
# Recorded in https://github.com/dask/dask/issues/2111.
data = np.asanyarray(data.compute())
return data
def multidim_lazy_stack(stack):
"""
Recursively build a multidimensional stacked dask array.
This is needed because dask.array.stack only accepts a 1-dimensional list.
Args:
* stack:
An ndarray of dask arrays.
Returns:
The input array converted to a lazy dask array.
"""
if stack.ndim == 0:
# A 0-d array cannot be stacked.
result = stack.item()
elif stack.ndim == 1:
# Another base case : simple 1-d goes direct in dask.
result = da.stack(list(stack))
else:
# Recurse because dask.stack does not do multi-dimensional.
result = da.stack([multidim_lazy_stack(subarray)
for subarray in stack])
return result
|
QuLogic/iris
|
lib/iris/_lazy_data.py
|
Python
|
gpl-3.0
| 5,137
|
[
"NetCDF"
] |
9ffd1a7480a5437110aac5be9e26f22dcffd66c9200926df58e7e386a0d573e6
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import branch_bound
np = branch_bound.np
MAX_BRANCHES = 10000
# This could be used if more splits are wanted than are possible
infState = branch_bound.InfState()
class OTSPstate:
def __init__(self,d,order,nagents,visit2agent=[0],time=[0.],lastat=None,):
'''
d: distance matrix
order: order in which nodes must be visited
time: time at which the nodes were visited
lastat[i,j]: the node where agent j most recently was at time[i]
nagents: number of agents
visit2agent[i] is the agent who makes visit i
'''
# This is the root
if lastat == None:
self.n = d.shape[0]
lastat = [[None]*nagents]
lastat[0][0] = 0
# Make a "start location" that is at distance 0 from everywhere
# Using index -1 for undeployed agents puts them at this "start"
else:
# "start" location has already been added but doesn't count
self.n = d.shape[0] - 1
self.d = d
self.lastat = lastat
self.order = order
self.nagents = nagents
self.visit2agent = visit2agent
self.time = time
self.m = len(time) # numer of visits that have already been made
self.value = self.time[-1]
def agentsNewTime(self,agent):
# The time at which this agent could make the next visit
# The index of the last visit this agent made
lastvisit = self.lastat[-1][agent]
# print len(self.time)
# print self.d.shape
# print 'agent',agent
# print ' lastvisit',lastvisit
# Assume agent's initial deployment is instantaneous
if lastvisit == None:
return self.time[-1]
# The node at which agent made his last visit
lastpos = self.order[lastvisit]
# The time at which agent was at lastpos
lasttime = self.time[lastvisit]
# The node that needs to be visited next
nextpos = self.order[self.m]
# print ' lastat',self.lastat
# print ' lastpos',lastpos
# print ' lasttime',lasttime
# print ' nextpos',nextpos
t = max( self.time[-1] , lasttime + self.d[nextpos,lastpos] )
# print ' t',t
return t
# He makes it either at the same time as the previous visit or as soon as he arrives at nextpos
return max( self.time[-1] , lasttime + self.d[nextpos,lastpos] )
def split(self,num):
'''
num: number of child states to produce
(in the easiest case, this is the same as nagents)
produces self.children states
'''
if self.m >= len(self.order):
raise branch_bound.CantSplit()
self.children = []
for agent in range(self.nagents):
newtime = self.agentsNewTime(agent)
# Everyone's last known position is the same, except that agent is now at m
newlast = list(self.lastat[-1])
newlast[agent] = self.m
self.children.append(OTSPstate(self.d,self.order,self.nagents,\
self.visit2agent+[agent],\
self.time+[newtime],\
self.lastat+[newlast],\
))
if num < self.nagents:
childorder = np.argsort([ child.value for child in self.children ])
self.children = np.array(self.children)
self.children = self.children[childorder[:num]]
def calcTimes(self):
'''
Calculates self.time and self.lastat
Uses data from self.d and self.visit2agent
Assumes self.time[0] should be 0
self.time and self.lastat are overwritten
'''
nvisits = len(self.order)
# These could be pre-allocated if agentsNewTime didn't use negativ index
# Same initialization as in __init__
self.time = [0.]
self.m = 1
# Agent 0 makes visit 0
self.lastat = [ [0]+[None]*(self.nagents-1) ]
for i in xrange(1,nvisits):
agent = self.visit2agent[i]
t = self.agentsNewTime(agent)
self.time.append(t)
self.m += 1
# Everyone has same position except for agent
self.lastat.append(list(self.lastat[-1]))
self.lastat[-1][agent] = i
self.children = []
self.value = self.time[-1]
return self.value
def getVisits(dists,order,nagents):
'''
dists: a distance matrix
order: the order in which nodes must be visited
duplicates allowed
nagents: the number of agents available to make the visits
returns visits,time
visits[i] = j means the ith visit should be performed by agent j
time[i] is the number of meters a person could have walked walk since the start when visit i is made
'''
root = OTSPstate(dists,order,nagents)
LO = MAX_BRANCHES // nagents
state,value = branch_bound.branch_bound(root, LO , LO*nagents)
return state.visit2agent,state.time
if __name__=='__main__':
import geometry
# pts = np.array([[0,0],\
# [0,1],\
# [0,5]])
# order = [0,2,1]
# pts = np.array([[0,0],\
# [0,1],\
# [0,2],\
# [0,3],\
# [0,5]])
# order = [0,4,1,2,3]
pts = np.array([[0,0],\
[3,0],\
[4,0],\
[7,0]])
order = [0,2,1,3]
visit2agent = [0,0,0,0]
d = geometry.planeDist(pts,pts)
# print getVisits(d,order,2)
state = OTSPstate(d,order,2,visit2agent)
print state.calcTimes()
|
encolpe/maxfield
|
lib/orderedTSP.py
|
Python
|
gpl-3.0
| 5,855
|
[
"VisIt"
] |
d5d1fea3aca3a8e1acebb6bab3247ab06a6fc8b782c489a69596f5f5b2707223
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
"""
Miscellaneous tools used by OpenERP.
"""
from functools import wraps
import cProfile
from contextlib import contextmanager
import subprocess
import logging
import os
import passlib.utils
import socket
import sys
import threading
import time
import werkzeug.utils
import zipfile
from collections import defaultdict, Hashable, Iterable, Mapping, OrderedDict
from itertools import islice, izip, groupby
from lxml import etree
from which import which
from threading import local
import traceback
try:
from html2text import html2text
except ImportError:
html2text = None
from config import config
from cache import *
from .parse_version import parse_version
import openerp
# get_encodings, ustr and exception_to_unicode were originally from tools.misc.
# There are moved to loglevels until we refactor tools.
from openerp.loglevels import get_encodings, ustr, exception_to_unicode # noqa
_logger = logging.getLogger(__name__)
# List of etree._Element subclasses that we choose to ignore when parsing XML.
# We include the *Base ones just in case, currently they seem to be subclasses of the _* ones.
SKIPPED_ELEMENT_TYPES = (etree._Comment, etree._ProcessingInstruction, etree.CommentBase, etree.PIBase, etree._Entity)
# Configure default global parser
etree.set_default_parser(etree.XMLParser(resolve_entities=False))
#----------------------------------------------------------
# Subprocesses
#----------------------------------------------------------
def find_in_path(name):
path = os.environ.get('PATH', os.defpath).split(os.pathsep)
if config.get('bin_path') and config['bin_path'] != 'None':
path.append(config['bin_path'])
return which(name, path=os.pathsep.join(path))
def _exec_pipe(prog, args, env=None):
cmd = (prog,) + args
# on win32, passing close_fds=True is not compatible
# with redirecting std[in/err/out]
close_fds = os.name=="posix"
pop = subprocess.Popen(cmd, bufsize=-1, stdin=subprocess.PIPE, stdout=subprocess.PIPE, close_fds=close_fds, env=env)
return pop.stdin, pop.stdout
def exec_command_pipe(name, *args):
prog = find_in_path(name)
if not prog:
raise Exception('Command `%s` not found.' % name)
return _exec_pipe(prog, args)
#----------------------------------------------------------
# Postgres subprocesses
#----------------------------------------------------------
def find_pg_tool(name):
path = None
if config['pg_path'] and config['pg_path'] != 'None':
path = config['pg_path']
try:
return which(name, path=path)
except IOError:
raise Exception('Command `%s` not found.' % name)
def exec_pg_environ():
"""
Force the database PostgreSQL environment variables to the database
configuration of Odoo.
Note: On systems where pg_restore/pg_dump require an explicit password
(i.e. on Windows where TCP sockets are used), it is necessary to pass the
postgres user password in the PGPASSWORD environment variable or in a
special .pgpass file.
See also http://www.postgresql.org/docs/8.4/static/libpq-envars.html
"""
env = os.environ.copy()
if openerp.tools.config['db_host']:
env['PGHOST'] = openerp.tools.config['db_host']
if openerp.tools.config['db_port']:
env['PGPORT'] = str(openerp.tools.config['db_port'])
if openerp.tools.config['db_user']:
env['PGUSER'] = openerp.tools.config['db_user']
if openerp.tools.config['db_password']:
env['PGPASSWORD'] = openerp.tools.config['db_password']
return env
def exec_pg_command(name, *args):
prog = find_pg_tool(name)
env = exec_pg_environ()
with open(os.devnull) as dn:
args2 = (prog,) + args
rc = subprocess.call(args2, env=env, stdout=dn, stderr=subprocess.STDOUT)
if rc:
raise Exception('Postgres subprocess %s error %s' % (args2, rc))
def exec_pg_command_pipe(name, *args):
prog = find_pg_tool(name)
env = exec_pg_environ()
return _exec_pipe(prog, args, env)
#----------------------------------------------------------
# File paths
#----------------------------------------------------------
#file_path_root = os.getcwd()
#file_path_addons = os.path.join(file_path_root, 'addons')
def file_open(name, mode="r", subdir='addons', pathinfo=False):
"""Open a file from the OpenERP root, using a subdir folder.
Example::
>>> file_open('hr/report/timesheer.xsl')
>>> file_open('addons/hr/report/timesheet.xsl')
>>> file_open('../../base/report/rml_template.xsl', subdir='addons/hr/report', pathinfo=True)
@param name name of the file
@param mode file open mode
@param subdir subdirectory
@param pathinfo if True returns tuple (fileobject, filepath)
@return fileobject if pathinfo is False else (fileobject, filepath)
"""
import openerp.modules as addons
adps = addons.module.ad_paths
rtp = os.path.normcase(os.path.abspath(config['root_path']))
basename = name
if os.path.isabs(name):
# It is an absolute path
# Is it below 'addons_path' or 'root_path'?
name = os.path.normcase(os.path.normpath(name))
for root in adps + [rtp]:
root = os.path.normcase(os.path.normpath(root)) + os.sep
if name.startswith(root):
base = root.rstrip(os.sep)
name = name[len(base) + 1:]
break
else:
# It is outside the OpenERP root: skip zipfile lookup.
base, name = os.path.split(name)
return _fileopen(name, mode=mode, basedir=base, pathinfo=pathinfo, basename=basename)
if name.replace(os.sep, '/').startswith('addons/'):
subdir = 'addons'
name2 = name[7:]
elif subdir:
name = os.path.join(subdir, name)
if name.replace(os.sep, '/').startswith('addons/'):
subdir = 'addons'
name2 = name[7:]
else:
name2 = name
# First, try to locate in addons_path
if subdir:
for adp in adps:
try:
return _fileopen(name2, mode=mode, basedir=adp,
pathinfo=pathinfo, basename=basename)
except IOError:
pass
# Second, try to locate in root_path
return _fileopen(name, mode=mode, basedir=rtp, pathinfo=pathinfo, basename=basename)
def _fileopen(path, mode, basedir, pathinfo, basename=None):
name = os.path.normpath(os.path.join(basedir, path))
if basename is None:
basename = name
# Give higher priority to module directories, which is
# a more common case than zipped modules.
if os.path.isfile(name):
fo = open(name, mode)
if pathinfo:
return fo, name
return fo
# Support for loading modules in zipped form.
# This will not work for zipped modules that are sitting
# outside of known addons paths.
head = os.path.normpath(path)
zipname = False
while os.sep in head:
head, tail = os.path.split(head)
if not tail:
break
if zipname:
zipname = os.path.join(tail, zipname)
else:
zipname = tail
zpath = os.path.join(basedir, head + '.zip')
if zipfile.is_zipfile(zpath):
from cStringIO import StringIO
zfile = zipfile.ZipFile(zpath)
try:
fo = StringIO()
fo.write(zfile.read(os.path.join(
os.path.basename(head), zipname).replace(
os.sep, '/')))
fo.seek(0)
if pathinfo:
return fo, name
return fo
except Exception:
pass
# Not found
if name.endswith('.rml'):
raise IOError('Report %r doesn\'t exist or deleted' % basename)
raise IOError('File not found: %s' % basename)
#----------------------------------------------------------
# iterables
#----------------------------------------------------------
def flatten(list):
"""Flatten a list of elements into a uniqu list
Author: Christophe Simonis (christophe@tinyerp.com)
Examples::
>>> flatten(['a'])
['a']
>>> flatten('b')
['b']
>>> flatten( [] )
[]
>>> flatten( [[], [[]]] )
[]
>>> flatten( [[['a','b'], 'c'], 'd', ['e', [], 'f']] )
['a', 'b', 'c', 'd', 'e', 'f']
>>> t = (1,2,(3,), [4, 5, [6, [7], (8, 9), ([10, 11, (12, 13)]), [14, [], (15,)], []]])
>>> flatten(t)
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
"""
def isiterable(x):
return hasattr(x, "__iter__")
r = []
for e in list:
if isiterable(e):
map(r.append, flatten(e))
else:
r.append(e)
return r
def reverse_enumerate(l):
"""Like enumerate but in the other sens
Usage::
>>> a = ['a', 'b', 'c']
>>> it = reverse_enumerate(a)
>>> it.next()
(2, 'c')
>>> it.next()
(1, 'b')
>>> it.next()
(0, 'a')
>>> it.next()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
StopIteration
"""
return izip(xrange(len(l)-1, -1, -1), reversed(l))
def topological_sort(elems):
""" Return a list of elements sorted so that their dependencies are listed
before them in the result.
:param elems: specifies the elements to sort with their dependencies; it is
a dictionary like `{element: dependencies}` where `dependencies` is a
collection of elements that must appear before `element`. The elements
of `dependencies` are not required to appear in `elems`; they will
simply not appear in the result.
:returns: a list with the keys of `elems` sorted according to their
specification.
"""
# the algorithm is inspired by [Tarjan 1976],
# http://en.wikipedia.org/wiki/Topological_sorting#Algorithms
result = []
visited = set()
def visit(n):
if n not in visited:
visited.add(n)
if n in elems:
# first visit all dependencies of n, then append n to result
map(visit, elems[n])
result.append(n)
map(visit, elems)
return result
class UpdateableStr(local):
""" Class that stores an updateable string (used in wizards)
"""
def __init__(self, string=''):
self.string = string
def __str__(self):
return str(self.string)
def __repr__(self):
return str(self.string)
def __nonzero__(self):
return bool(self.string)
class UpdateableDict(local):
"""Stores an updateable dict to use in wizards
"""
def __init__(self, dict=None):
if dict is None:
dict = {}
self.dict = dict
def __str__(self):
return str(self.dict)
def __repr__(self):
return str(self.dict)
def clear(self):
return self.dict.clear()
def keys(self):
return self.dict.keys()
def __setitem__(self, i, y):
self.dict.__setitem__(i, y)
def __getitem__(self, i):
return self.dict.__getitem__(i)
def copy(self):
return self.dict.copy()
def iteritems(self):
return self.dict.iteritems()
def iterkeys(self):
return self.dict.iterkeys()
def itervalues(self):
return self.dict.itervalues()
def pop(self, k, d=None):
return self.dict.pop(k, d)
def popitem(self):
return self.dict.popitem()
def setdefault(self, k, d=None):
return self.dict.setdefault(k, d)
def update(self, E, **F):
return self.dict.update(E, F)
def values(self):
return self.dict.values()
def get(self, k, d=None):
return self.dict.get(k, d)
def has_key(self, k):
return self.dict.has_key(k)
def items(self):
return self.dict.items()
def __cmp__(self, y):
return self.dict.__cmp__(y)
def __contains__(self, k):
return self.dict.__contains__(k)
def __delitem__(self, y):
return self.dict.__delitem__(y)
def __eq__(self, y):
return self.dict.__eq__(y)
def __ge__(self, y):
return self.dict.__ge__(y)
def __gt__(self, y):
return self.dict.__gt__(y)
def __hash__(self):
return self.dict.__hash__()
def __iter__(self):
return self.dict.__iter__()
def __le__(self, y):
return self.dict.__le__(y)
def __len__(self):
return self.dict.__len__()
def __lt__(self, y):
return self.dict.__lt__(y)
def __ne__(self, y):
return self.dict.__ne__(y)
def to_xml(s):
return s.replace('&','&').replace('<','<').replace('>','>')
def get_iso_codes(lang):
if lang.find('_') != -1:
if lang.split('_')[0] == lang.split('_')[1].lower():
lang = lang.split('_')[0]
return lang
ALL_LANGUAGES = {
'am_ET': u'Amharic / አምሃርኛ',
'ar_SY': u'Arabic / الْعَرَبيّة',
'bg_BG': u'Bulgarian / български език',
'bs_BA': u'Bosnian / bosanski jezik',
'ca_ES': u'Catalan / Català',
'cs_CZ': u'Czech / Čeština',
'da_DK': u'Danish / Dansk',
'de_DE': u'German / Deutsch',
'el_GR': u'Greek / Ελληνικά',
'en_AU': u'English (AU)',
'en_GB': u'English (UK)',
'en_US': u'English (US)',
'es_AR': u'Spanish (AR) / Español (AR)',
'es_BO': u'Spanish (BO) / Español (BO)',
'es_CL': u'Spanish (CL) / Español (CL)',
'es_CO': u'Spanish (CO) / Español (CO)',
'es_CR': u'Spanish (CR) / Español (CR)',
'es_DO': u'Spanish (DO) / Español (DO)',
'es_EC': u'Spanish (EC) / Español (EC)',
'es_ES': u'Spanish / Español',
'es_GT': u'Spanish (GT) / Español (GT)',
'es_MX': u'Spanish (MX) / Español (MX)',
'es_PA': u'Spanish (PA) / Español (PA)',
'es_PE': u'Spanish (PE) / Español (PE)',
'es_PY': u'Spanish (PY) / Español (PY)',
'es_UY': u'Spanish (UY) / Español (UY)',
'es_VE': u'Spanish (VE) / Español (VE)',
'et_EE': u'Estonian / Eesti keel',
'eu_ES': u'Basque / Euskara',
'fa_IR': u'Persian / فارس',
'fi_FI': u'Finnish / Suomi',
'fr_BE': u'French (BE) / Français (BE)',
'fr_CA': u'French (CA) / Français (CA)',
'fr_CH': u'French (CH) / Français (CH)',
'fr_CA': u'French (CA) / Français (CA)',
'fr_FR': u'French / Français',
'gl_ES': u'Galician / Galego',
'gu_IN': u'Gujarati / ગુજરાતી',
'he_IL': u'Hebrew / עִבְרִי',
'hi_IN': u'Hindi / हिंदी',
'hr_HR': u'Croatian / hrvatski jezik',
'hu_HU': u'Hungarian / Magyar',
'id_ID': u'Indonesian / Bahasa Indonesia',
'it_IT': u'Italian / Italiano',
'ja_JP': u'Japanese / 日本語',
'ka_GE': u'Georgian / ქართული ენა',
'kab_DZ': u'Kabyle / Taqbaylit',
'ko_KP': u'Korean (KP) / 한국어 (KP)',
'ko_KR': u'Korean (KR) / 한국어 (KR)',
'lo_LA': u'Lao / ພາສາລາວ',
'lt_LT': u'Lithuanian / Lietuvių kalba',
'lv_LV': u'Latvian / latviešu valoda',
'mk_MK': u'Macedonian / македонски јазик',
'mn_MN': u'Mongolian / монгол',
'nb_NO': u'Norwegian Bokmål / Norsk bokmål',
'nl_NL': u'Dutch / Nederlands',
'nl_BE': u'Dutch (BE) / Nederlands (BE)',
'pl_PL': u'Polish / Język polski',
'pt_BR': u'Portuguese (BR) / Português (BR)',
'pt_PT': u'Portuguese / Português',
'ro_RO': u'Romanian / română',
'ru_RU': u'Russian / русский язык',
'sl_SI': u'Slovenian / slovenščina',
'sk_SK': u'Slovak / Slovenský jazyk',
'sq_AL': u'Albanian / Shqip',
'sr_RS': u'Serbian (Cyrillic) / српски',
'sr@latin': u'Serbian (Latin) / srpski',
'sv_SE': u'Swedish / svenska',
'te_IN': u'Telugu / తెలుగు',
'tr_TR': u'Turkish / Türkçe',
'vi_VN': u'Vietnamese / Tiếng Việt',
'uk_UA': u'Ukrainian / українська',
'zh_CN': u'Chinese (CN) / 简体中文',
'zh_HK': u'Chinese (HK)',
'zh_TW': u'Chinese (TW) / 正體字',
'th_TH': u'Thai / ภาษาไทย',
}
def scan_languages():
""" Returns all languages supported by OpenERP for translation
:returns: a list of (lang_code, lang_name) pairs
:rtype: [(str, unicode)]
"""
return sorted(ALL_LANGUAGES.iteritems(), key=lambda k: k[1])
def get_user_companies(cr, user):
def _get_company_children(cr, ids):
if not ids:
return []
cr.execute('SELECT id FROM res_company WHERE parent_id IN %s', (tuple(ids),))
res = [x[0] for x in cr.fetchall()]
res.extend(_get_company_children(cr, res))
return res
cr.execute('SELECT company_id FROM res_users WHERE id=%s', (user,))
user_comp = cr.fetchone()[0]
if not user_comp:
return []
return [user_comp] + _get_company_children(cr, [user_comp])
def mod10r(number):
"""
Input number : account or invoice number
Output return: the same number completed with the recursive mod10
key
"""
codec=[0,9,4,6,8,2,7,1,3,5]
report = 0
result=""
for digit in number:
result += digit
if digit.isdigit():
report = codec[ (int(digit) + report) % 10 ]
return result + str((10 - report) % 10)
def human_size(sz):
"""
Return the size in a human readable format
"""
if not sz:
return False
units = ('bytes', 'Kb', 'Mb', 'Gb')
if isinstance(sz,basestring):
sz=len(sz)
s, i = float(sz), 0
while s >= 1024 and i < len(units)-1:
s /= 1024
i += 1
return "%0.2f %s" % (s, units[i])
def logged(f):
@wraps(f)
def wrapper(*args, **kwargs):
from pprint import pformat
vector = ['Call -> function: %r' % f]
for i, arg in enumerate(args):
vector.append(' arg %02d: %s' % (i, pformat(arg)))
for key, value in kwargs.items():
vector.append(' kwarg %10s: %s' % (key, pformat(value)))
timeb4 = time.time()
res = f(*args, **kwargs)
vector.append(' result: %s' % pformat(res))
vector.append(' time delta: %s' % (time.time() - timeb4))
_logger.debug('\n'.join(vector))
return res
return wrapper
class profile(object):
def __init__(self, fname=None):
self.fname = fname
def __call__(self, f):
@wraps(f)
def wrapper(*args, **kwargs):
profile = cProfile.Profile()
result = profile.runcall(f, *args, **kwargs)
profile.dump_stats(self.fname or ("%s.cprof" % (f.func_name,)))
return result
return wrapper
def detect_ip_addr():
"""Try a very crude method to figure out a valid external
IP or hostname for the current machine. Don't rely on this
for binding to an interface, but it could be used as basis
for constructing a remote URL to the server.
"""
def _detect_ip_addr():
from array import array
from struct import pack, unpack
try:
import fcntl
except ImportError:
fcntl = None
ip_addr = None
if not fcntl: # not UNIX:
host = socket.gethostname()
ip_addr = socket.gethostbyname(host)
else: # UNIX:
# get all interfaces:
nbytes = 128 * 32
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
names = array('B', '\0' * nbytes)
#print 'names: ', names
outbytes = unpack('iL', fcntl.ioctl( s.fileno(), 0x8912, pack('iL', nbytes, names.buffer_info()[0])))[0]
namestr = names.tostring()
# try 64 bit kernel:
for i in range(0, outbytes, 40):
name = namestr[i:i+16].split('\0', 1)[0]
if name != 'lo':
ip_addr = socket.inet_ntoa(namestr[i+20:i+24])
break
# try 32 bit kernel:
if ip_addr is None:
ifaces = filter(None, [namestr[i:i+32].split('\0', 1)[0] for i in range(0, outbytes, 32)])
for ifname in [iface for iface in ifaces if iface != 'lo']:
ip_addr = socket.inet_ntoa(fcntl.ioctl(s.fileno(), 0x8915, pack('256s', ifname[:15]))[20:24])
break
return ip_addr or 'localhost'
try:
ip_addr = _detect_ip_addr()
except Exception:
ip_addr = 'localhost'
return ip_addr
DEFAULT_SERVER_DATE_FORMAT = "%Y-%m-%d"
DEFAULT_SERVER_TIME_FORMAT = "%H:%M:%S"
DEFAULT_SERVER_DATETIME_FORMAT = "%s %s" % (
DEFAULT_SERVER_DATE_FORMAT,
DEFAULT_SERVER_TIME_FORMAT)
# Python's strftime supports only the format directives
# that are available on the platform's libc, so in order to
# be cross-platform we map to the directives required by
# the C standard (1989 version), always available on platforms
# with a C standard implementation.
DATETIME_FORMATS_MAP = {
'%C': '', # century
'%D': '%m/%d/%Y', # modified %y->%Y
'%e': '%d',
'%E': '', # special modifier
'%F': '%Y-%m-%d',
'%g': '%Y', # modified %y->%Y
'%G': '%Y',
'%h': '%b',
'%k': '%H',
'%l': '%I',
'%n': '\n',
'%O': '', # special modifier
'%P': '%p',
'%R': '%H:%M',
'%r': '%I:%M:%S %p',
'%s': '', #num of seconds since epoch
'%T': '%H:%M:%S',
'%t': ' ', # tab
'%u': ' %w',
'%V': '%W',
'%y': '%Y', # Even if %y works, it's ambiguous, so we should use %Y
'%+': '%Y-%m-%d %H:%M:%S',
# %Z is a special case that causes 2 problems at least:
# - the timezone names we use (in res_user.context_tz) come
# from pytz, but not all these names are recognized by
# strptime(), so we cannot convert in both directions
# when such a timezone is selected and %Z is in the format
# - %Z is replaced by an empty string in strftime() when
# there is not tzinfo in a datetime value (e.g when the user
# did not pick a context_tz). The resulting string does not
# parse back if the format requires %Z.
# As a consequence, we strip it completely from format strings.
# The user can always have a look at the context_tz in
# preferences to check the timezone.
'%z': '',
'%Z': '',
}
POSIX_TO_LDML = {
'a': 'E',
'A': 'EEEE',
'b': 'MMM',
'B': 'MMMM',
#'c': '',
'd': 'dd',
'H': 'HH',
'I': 'hh',
'j': 'DDD',
'm': 'MM',
'M': 'mm',
'p': 'a',
'S': 'ss',
'U': 'w',
'w': 'e',
'W': 'w',
'y': 'yy',
'Y': 'yyyy',
# see comments above, and babel's format_datetime assumes an UTC timezone
# for naive datetime objects
#'z': 'Z',
#'Z': 'z',
}
def posix_to_ldml(fmt, locale):
""" Converts a posix/strftime pattern into an LDML date format pattern.
:param fmt: non-extended C89/C90 strftime pattern
:param locale: babel locale used for locale-specific conversions (e.g. %x and %X)
:return: unicode
"""
buf = []
pc = False
quoted = []
for c in fmt:
# LDML date format patterns uses letters, so letters must be quoted
if not pc and c.isalpha():
quoted.append(c if c != "'" else "''")
continue
if quoted:
buf.append("'")
buf.append(''.join(quoted))
buf.append("'")
quoted = []
if pc:
if c == '%': # escaped percent
buf.append('%')
elif c == 'x': # date format, short seems to match
buf.append(locale.date_formats['short'].pattern)
elif c == 'X': # time format, seems to include seconds. short does not
buf.append(locale.time_formats['medium'].pattern)
else: # look up format char in static mapping
buf.append(POSIX_TO_LDML[c])
pc = False
elif c == '%':
pc = True
else:
buf.append(c)
# flush anything remaining in quoted buffer
if quoted:
buf.append("'")
buf.append(''.join(quoted))
buf.append("'")
return ''.join(buf)
def split_every(n, iterable, piece_maker=tuple):
"""Splits an iterable into length-n pieces. The last piece will be shorter
if ``n`` does not evenly divide the iterable length.
@param ``piece_maker``: function to build the pieces
from the slices (tuple,list,...)
"""
iterator = iter(iterable)
piece = piece_maker(islice(iterator, n))
while piece:
yield piece
piece = piece_maker(islice(iterator, n))
if __name__ == '__main__':
import doctest
doctest.testmod()
class upload_data_thread(threading.Thread):
def __init__(self, email, data, type):
self.args = [('email',email),('type',type),('data',data)]
super(upload_data_thread,self).__init__()
def run(self):
try:
import urllib
args = urllib.urlencode(self.args)
fp = urllib.urlopen('http://www.openerp.com/scripts/survey.php', args)
fp.read()
fp.close()
except Exception:
pass
def upload_data(email, data, type='SURVEY'):
a = upload_data_thread(email, data, type)
a.start()
return True
def get_and_group_by_field(cr, uid, obj, ids, field, context=None):
""" Read the values of ``field´´ for the given ``ids´´ and group ids by value.
:param string field: name of the field we want to read and group by
:return: mapping of field values to the list of ids that have it
:rtype: dict
"""
res = {}
for record in obj.read(cr, uid, ids, [field], context=context):
key = record[field]
res.setdefault(key[0] if isinstance(key, tuple) else key, []).append(record['id'])
return res
def get_and_group_by_company(cr, uid, obj, ids, context=None):
return get_and_group_by_field(cr, uid, obj, ids, field='company_id', context=context)
# port of python 2.6's attrgetter with support for dotted notation
def resolve_attr(obj, attr):
for name in attr.split("."):
obj = getattr(obj, name)
return obj
def attrgetter(*items):
if len(items) == 1:
attr = items[0]
def g(obj):
return resolve_attr(obj, attr)
else:
def g(obj):
return tuple(resolve_attr(obj, attr) for attr in items)
return g
class unquote(str):
"""A subclass of str that implements repr() without enclosing quotation marks
or escaping, keeping the original string untouched. The name come from Lisp's unquote.
One of the uses for this is to preserve or insert bare variable names within dicts during eval()
of a dict's repr(). Use with care.
Some examples (notice that there are never quotes surrounding
the ``active_id`` name:
>>> unquote('active_id')
active_id
>>> d = {'test': unquote('active_id')}
>>> d
{'test': active_id}
>>> print d
{'test': active_id}
"""
def __repr__(self):
return self
class UnquoteEvalContext(defaultdict):
"""Defaultdict-based evaluation context that returns
an ``unquote`` string for any missing name used during
the evaluation.
Mostly useful for evaluating OpenERP domains/contexts that
may refer to names that are unknown at the time of eval,
so that when the context/domain is converted back to a string,
the original names are preserved.
**Warning**: using an ``UnquoteEvalContext`` as context for ``eval()`` or
``safe_eval()`` will shadow the builtins, which may cause other
failures, depending on what is evaluated.
Example (notice that ``section_id`` is preserved in the final
result) :
>>> context_str = "{'default_user_id': uid, 'default_section_id': section_id}"
>>> eval(context_str, UnquoteEvalContext(uid=1))
{'default_user_id': 1, 'default_section_id': section_id}
"""
def __init__(self, *args, **kwargs):
super(UnquoteEvalContext, self).__init__(None, *args, **kwargs)
def __missing__(self, key):
return unquote(key)
class mute_logger(object):
"""Temporary suppress the logging.
Can be used as context manager or decorator.
@mute_logger('openerp.plic.ploc')
def do_stuff():
blahblah()
with mute_logger('openerp.foo.bar'):
do_suff()
"""
def __init__(self, *loggers):
self.loggers = loggers
def filter(self, record):
return 0
def __enter__(self):
for logger in self.loggers:
assert isinstance(logger, basestring),\
"A logger name must be a string, got %s" % type(logger)
logging.getLogger(logger).addFilter(self)
def __exit__(self, exc_type=None, exc_val=None, exc_tb=None):
for logger in self.loggers:
logging.getLogger(logger).removeFilter(self)
def __call__(self, func):
@wraps(func)
def deco(*args, **kwargs):
with self:
return func(*args, **kwargs)
return deco
_ph = object()
class CountingStream(object):
""" Stream wrapper counting the number of element it has yielded. Similar
role to ``enumerate``, but for use when the iteration process of the stream
isn't fully under caller control (the stream can be iterated from multiple
points including within a library)
``start`` allows overriding the starting index (the index before the first
item is returned).
On each iteration (call to :meth:`~.next`), increases its :attr:`~.index`
by one.
.. attribute:: index
``int``, index of the last yielded element in the stream. If the stream
has ended, will give an index 1-past the stream
"""
def __init__(self, stream, start=-1):
self.stream = iter(stream)
self.index = start
self.stopped = False
def __iter__(self):
return self
def next(self):
if self.stopped: raise StopIteration()
self.index += 1
val = next(self.stream, _ph)
if val is _ph:
self.stopped = True
raise StopIteration()
return val
def stripped_sys_argv(*strip_args):
"""Return sys.argv with some arguments stripped, suitable for reexecution or subprocesses"""
strip_args = sorted(set(strip_args) | set(['-s', '--save', '-u', '--update', '-i', '--init']))
assert all(config.parser.has_option(s) for s in strip_args)
takes_value = dict((s, config.parser.get_option(s).takes_value()) for s in strip_args)
longs, shorts = list(tuple(y) for _, y in groupby(strip_args, lambda x: x.startswith('--')))
longs_eq = tuple(l + '=' for l in longs if takes_value[l])
args = sys.argv[:]
def strip(args, i):
return args[i].startswith(shorts) \
or args[i].startswith(longs_eq) or (args[i] in longs) \
or (i >= 1 and (args[i - 1] in strip_args) and takes_value[args[i - 1]])
return [x for i, x in enumerate(args) if not strip(args, i)]
class ConstantMapping(Mapping):
"""
An immutable mapping returning the provided value for every single key.
Useful for default value to methods
"""
__slots__ = ['_value']
def __init__(self, val):
self._value = val
def __len__(self):
"""
defaultdict updates its length for each individually requested key, is
that really useful?
"""
return 0
def __iter__(self):
"""
same as len, defaultdict udpates its iterable keyset with each key
requested, is there a point for this?
"""
return iter([])
def __getitem__(self, item):
return self._value
def dumpstacks(sig=None, frame=None):
""" Signal handler: dump a stack trace for each existing thread."""
code = []
def extract_stack(stack):
for filename, lineno, name, line in traceback.extract_stack(stack):
yield 'File: "%s", line %d, in %s' % (filename, lineno, name)
if line:
yield " %s" % (line.strip(),)
# code from http://stackoverflow.com/questions/132058/getting-stack-trace-from-a-running-python-application#answer-2569696
# modified for python 2.5 compatibility
threads_info = dict([(th.ident, {'name': th.name, 'uid': getattr(th, 'uid', 'n/a')})
for th in threading.enumerate()])
for threadId, stack in sys._current_frames().items():
thread_info = threads_info.get(threadId)
code.append("\n# Thread: %s (id:%s) (uid:%s)" %
(thread_info and thread_info['name'] or 'n/a',
threadId,
thread_info and thread_info['uid'] or 'n/a'))
for line in extract_stack(stack):
code.append(line)
if openerp.evented:
# code from http://stackoverflow.com/questions/12510648/in-gevent-how-can-i-dump-stack-traces-of-all-running-greenlets
import gc
from greenlet import greenlet
for ob in gc.get_objects():
if not isinstance(ob, greenlet) or not ob:
continue
code.append("\n# Greenlet: %r" % (ob,))
for line in extract_stack(ob.gr_frame):
code.append(line)
_logger.info("\n".join(code))
def freehash(arg):
try:
return hash(arg)
except Exception:
if isinstance(arg, Mapping):
return hash(frozendict(arg))
elif isinstance(arg, Iterable):
return hash(frozenset(map(freehash, arg)))
else:
return id(arg)
class frozendict(dict):
""" An implementation of an immutable dictionary. """
def __delitem__(self, key):
raise NotImplementedError("'__delitem__' not supported on frozendict")
def __setitem__(self, key, val):
raise NotImplementedError("'__setitem__' not supported on frozendict")
def clear(self):
raise NotImplementedError("'clear' not supported on frozendict")
def pop(self, key, default=None):
raise NotImplementedError("'pop' not supported on frozendict")
def popitem(self):
raise NotImplementedError("'popitem' not supported on frozendict")
def setdefault(self, key, default=None):
raise NotImplementedError("'setdefault' not supported on frozendict")
def update(self, *args, **kwargs):
raise NotImplementedError("'update' not supported on frozendict")
def __hash__(self):
return hash(frozenset((key, freehash(val)) for key, val in self.iteritems()))
class Collector(Mapping):
""" A mapping from keys to lists. This is essentially a space optimization
for ``defaultdict(list)``.
"""
__slots__ = ['_map']
def __init__(self):
self._map = {}
def add(self, key, val):
vals = self._map.setdefault(key, [])
if val not in vals:
vals.append(val)
def __getitem__(self, key):
return self._map.get(key, ())
def __iter__(self):
return iter(self._map)
def __len__(self):
return len(self._map)
class OrderedSet(OrderedDict):
""" A simple collection that remembers the elements insertion order. """
def __init__(self, seq=()):
super(OrderedSet, self).__init__([(x, None) for x in seq])
def add(self, elem):
self[elem] = None
def discard(self, elem):
self.pop(elem, None)
@contextmanager
def ignore(*exc):
try:
yield
except exc:
pass
# Avoid DeprecationWarning while still remaining compatible with werkzeug pre-0.9
if parse_version(getattr(werkzeug, '__version__', '0.0')) < parse_version('0.9.0'):
def html_escape(text):
return werkzeug.utils.escape(text, quote=True)
else:
def html_escape(text):
return werkzeug.utils.escape(text)
def formatLang(env, value, digits=None, grouping=True, monetary=False, dp=False, currency_obj=False):
"""
Assuming 'Account' decimal.precision=3:
formatLang(value) -> digits=2 (default)
formatLang(value, digits=4) -> digits=4
formatLang(value, dp='Account') -> digits=3
formatLang(value, digits=5, dp='Account') -> digits=5
"""
if digits is None:
digits = DEFAULT_DIGITS = 2
if dp:
decimal_precision_obj = env['decimal.precision']
digits = decimal_precision_obj.precision_get(dp)
elif (hasattr(value, '_field') and isinstance(value._field, (float_field, function_field)) and value._field.digits):
digits = value._field.digits[1]
if not digits and digits is not 0:
digits = DEFAULT_DIGITS
if isinstance(value, (str, unicode)) and not value:
return ''
lang = env.user.company_id.partner_id.lang or 'en_US'
lang_objs = env['res.lang'].search([('code', '=', lang)])
if not lang_objs:
lang_objs = env['res.lang'].search([('code', '=', 'en_US')])
lang_obj = lang_objs[0]
res = lang_obj.format('%.' + str(digits) + 'f', value, grouping=grouping, monetary=monetary)
if currency_obj:
if currency_obj.position == 'after':
res = '%s %s' % (res, currency_obj.symbol)
elif currency_obj and currency_obj.position == 'before':
res = '%s %s' % (currency_obj.symbol, res)
return res
def _consteq(str1, str2):
""" Constant-time string comparison. Suitable to compare bytestrings of fixed,
known length only, because length difference is optimized. """
return len(str1) == len(str2) and sum(ord(x)^ord(y) for x, y in zip(str1, str2)) == 0
consteq = getattr(passlib.utils, 'consteq', _consteq)
|
QinerTech/QinerApps
|
openerp/tools/misc.py
|
Python
|
gpl-3.0
| 38,349
|
[
"VisIt"
] |
a3816179c2f10ecab6cd3dacb593410597fb833a6aa8540505c7b410ec293d5d
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2002-2007 Donald N. Allingham
# Copyright (C) 2007-2008 Brian G. Matherly
# Copyright (C) 2008 Jerome Rapinat
# Copyright (C) 2008 Benny Malengier
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
#-------------------------------------------------------------------------
#
# Standard Python modules
#
#-------------------------------------------------------------------------
from ....ggettext import gettext as _
#-------------------------------------------------------------------------
#
# GRAMPS modules
#
#-------------------------------------------------------------------------
from .._hassourcecountbase import HasSourceCountBase
#-------------------------------------------------------------------------
# "People having sources"
#-------------------------------------------------------------------------
class HasSourceCount(HasSourceCountBase):
"""People with sources"""
name = _('People with <count> sources')
description = _("Matches people with a certain number of sources connected to it")
|
arunkgupta/gramps
|
gramps/gen/filters/rules/person/_hassourcecount.py
|
Python
|
gpl-2.0
| 1,763
|
[
"Brian"
] |
298a8e4e73ea29dd5c9977a9a9438b5ae60fa5cbabe920543d48897c8279b187
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
"""\
=======================================================
Inbox size limiting Pipelines, Graphlines and Carousels
=======================================================
Extended versions of Kamaelia.Chassis.Pipeline, Kamaelia.Chassis.Graphline and
Kamaelia.Chassis.Carousel that add the ability to specify size limits for
inboxes of components.
Example Usages
--------------
A pipeline with inbox size limits on 3 of the components' "inbox" inboxes::
Pipeline( 5, MyComponent(), # 'inbox' inbox limited to 5 items
2, MyComponent(), # 'inbox' inbox limited to 2 items
MyComponent(), # 'inbox' inbox unlimited
28, MyComponent() # 'inbox' inbox limited to 28 items
)
A graphline where component 'A' has a size limit of 5 on its "inbox" inbox; and
component 'C' has a size limit of 17 on its "control" inbox::
Graphline( A = MyComponent(),
B = MyComponent(),
C = MyComponent(),
linkages = { ... },
boxsizes = {
("A","inbox") : 5,
("C","control") : 17
}
)
A Carousel, where the child component will have a size limit of 5 on its "inbox"
inbox::
Carousel( MyComponent(), boxsize=5 )
Decoding a Dirac video file and saving each frame in a separate file::
Pipeline(
RateControlledFileReader("video.dirac", ... ),
DiracDecoder(),
TagWithSequenceNumber(),
InboxControlledCarousel(
lambda (seqnum, frame) :
Pipeline( OneShot(frame),
FrameToYUV4MPEG(),
SimpleFileWriter("%08d.yuv4mpeg" % seqnum),
)
),
)
More details
------------
The behaviour of these three components/prefabs is identical to their original
counterparts (Kamaelia.Chassis.Pipeline, Kamaelia.Chassis.Graphline and
Kamaelia.Chassis.Carousel).
*For Pipelines*, if you want to size limit the "inbox" inbox of a particular
component in the pipeline, then put the size limit as an integer before it.
Any component without an integer before it is left with the default of an
unlimited "inbox" inbox.
The behaviour therefore reduces back to be identical to that of the normal
Pipeline component.
*For Graphlines*, if you want to size limit particular inboxes, supply the
"boxsizes" argument with a dictionary that maps (componentName, boxName) keys
to the size limit for that box.
Again, if you don't specify a "boxsizes" argument, then behaviour is identical
to that of the normal Graphline component.
*For Carousels*, if you want a size limit on the "inbox" inbox of the child
component (created by the factory function), then specify it using the
"boxsizes" argument.
Again, if you don't specify a "boxsizes" argument, then behaviour is identical
to that of the normal Carousel component.
*InboxControlledCarousel* behaves identically to Carousel.
The "inbox" inbox is equivalent to the "next" inbox of Carousel.
The "data_inbox" inbox is equivalent to the "inbox" inbox of Carousel.
"""
from Kamaelia.Chassis.Pipeline import Pipeline as _Pipeline
def Pipeline(*components):
"""\
Pipeline(\*components) -> new Pipeline component.
Encapsulates the specified set of components and wires them up in a chain
(a Pipeline) in the order you provided them.
Keyword arguments:
- components -- the components you want, in the order you want them wired up.
Any Integers set the "inbox" inbox size limit for the component that follows them.
"""
truecomponents = []
boxsize=False
for item in components:
if isinstance(item,int):
boxsize=item
elif item is None:
boxsize=item
else:
component=item
if boxsize != False:
component.inboxes['inbox'].setSize(boxsize)
boxsize=False
truecomponents.append(component)
return _Pipeline(*truecomponents)
from Kamaelia.Chassis.Graphline import Graphline as _Graphline
def Graphline(linkages = None, boxsizes = None,**components):
"""\
Graphline([linkages][,boxsizes],\*\*components) -> new Graphline component
Encapsulates the specified set of components and wires them up with the
specified linkages.
Keyword arguments:
- linkages -- dictionary mapping ("componentname","boxname") to ("componentname","boxname")
- boxsizes -- dictionary mapping ("componentname","boxname") to size limit for inbox
- components -- dictionary mapping names to component instances (default is nothing)
"""
g = _Graphline(linkages,**components)
if boxsizes is not None:
for ((componentname,boxname),size) in boxsizes.items():
components[componentname].inboxes[boxname].setSize(size)
return g
from Kamaelia.Chassis.Carousel import Carousel as _Carousel
def Carousel(componentFactory, make1stRequest=False, boxsize=None):
"""\
Carousel(componentFactory[,make1stRequest][,boxSize]) -> new Carousel component
Create a Carousel component that makes child components one at a time (in
carousel fashion) using the supplied factory function.
Keyword arguments:
- componentFactory -- function that takes a single argument and returns a component
- make1stRequest -- if True, Carousel will send an initial "NEXT" request. (default=False)
- boxsize -- size limit for "inbox" inbox of the created child component
"""
if boxsize is not None:
def setBoxSize(component):
component.inboxes['inbox'].setSize(boxsize)
return component
newComponentFactory = lambda meta : setBoxSize(componentFactory(meta))
else:
newComponentFactory = componentFactory
return _Carousel(newComponentFactory, make1stRequest)
def InboxControlledCarousel(*argv, **argd):
"""\
InboxControlledCarousel(componentFactory[,make1stRequest][,boxSize]) -> new Carousel component
Create an InboxControlledCarousel component that makes child components one at a time (in
carousel fashion) using the supplied factory function.
Keyword arguments:
- componentFactory -- function that takes a single argument and returns a component
- make1stRequest -- if True, Carousel will send an initial "NEXT" request. (default=False)
- boxsize -- size limit for "inbox" inbox of the created child component
"""
return Graphline( CAROUSEL = Carousel( *argv, **argd ),
linkages = {
("", "inbox") : ("CAROUSEL", "next"),
("", "data_inbox") : ("CAROUSEL", "inbox"),
("", "control") : ("CAROUSEL", "control"),
("CAROUSEL", "outbox") : ("", "outbox"),
("CAROUSEL", "signal") : ("", "signal"),
}
)
__kamaelia_prefabs__ = ( Pipeline, Graphline, Carousel, InboxControlledCarousel, )
|
sparkslabs/kamaelia
|
Sketches/MPS/BugReports/FixTests/Kamaelia/Kamaelia/Experimental/Chassis.py
|
Python
|
apache-2.0
| 8,048
|
[
"DIRAC"
] |
fb3d7b8a9b87aad8f01353c098cbd2082fcf85e10309739433e583f8a3fea047
|
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division
import unittest, numpy as np
from pyscf import gto, scf
from pyscf.nao import nao, scf as scf_nao, conv_yzx2xyz_c
mol = gto.M( verbose = 1,
atom = '''
H 0 0 0
H 0 0.757 0.587''', basis = 'cc-pvdz',)
conv = conv_yzx2xyz_c(mol)
gto_hf = scf.RHF(mol)
gto_hf.kernel()
rdm1 = conv.conv_yzx2xyz_2d(gto_hf.make_rdm1())
class KnowValues(unittest.TestCase):
def test_kmat_gto_vs_nao(self):
""" Test computation of Fock exchange between NAOs against this computed between GTOs"""
vh_gto,k_gto = gto_hf.get_jk()
k_gto = conv.conv_yzx2xyz_2d(k_gto)
mf = scf_nao(mf=gto_hf, gto=mol)
k_nao = mf.get_k(dm=rdm1)
self.assertTrue(abs(k_nao-k_gto).sum()/k_gto.size<2.5e-5)
def test_overlap_gto_vs_nao(self):
""" Test computation of overlaps between NAOs against overlaps computed between GTOs"""
from pyscf.nao.m_overlap_am import overlap_am
oref = conv.conv_yzx2xyz_2d(mol.intor_symmetric('cint1e_ovlp_sph'))
sv = nao(gto=mol)
over = sv.overlap_coo(funct=overlap_am).toarray()
self.assertTrue(abs(over-oref).sum()<5e-9)
def test_laplace_gto_vs_nao(self):
""" Test computation of kinetic energy between NAOs against those computed between GTOs"""
from pyscf.nao.m_laplace_am import laplace_am
tref = conv.conv_yzx2xyz_2d(mol.intor_symmetric('int1e_kin'))
sv = nao(gto=mol)
tkin = (-0.5*sv.overlap_coo(funct=laplace_am)).toarray()
self.assertTrue(abs(tref-tkin).sum()/len(tkin)<5e-9)
def test_vhartree_gto_vs_nao(self):
""" Test computation of Hartree potential between NAOs against this computed between GTOs"""
vh_gto = conv.conv_yzx2xyz_2d(gto_hf.get_j())
scf = scf_nao(mf=gto_hf, gto=mol)
vh_nao = scf.vhartree_coo(dm=rdm1)
self.assertTrue(abs(vh_nao-vh_gto).sum()/vh_gto.size<1e-5)
def test_vne_gto_vs_nao(self):
""" Test computation of matrix elements of nuclear-electron attraction """
vne = mol.intor_symmetric('int1e_nuc')
vne_gto = conv.conv_yzx2xyz_2d(vne)
sv = nao(gto=mol)
vne_nao = sv.vnucele_coo_coulomb(level=1)
#print('a,b,c', (vne_nao).sum(), (vne_gto).sum(), abs(vne_nao-vne_gto).sum()/vne_gto.size)
self.assertTrue(abs(vne_nao-vne_gto).sum()/vne_gto.size<5e-6)
def test_energy_nuc_gto_vs_nao(self):
""" Test computation of matrix elements of nuclear-electron attraction """
sv = nao(gto=mol)
e_nao = sv.energy_nuc()
e_gto = mol.energy_nuc()
self.assertAlmostEqual(e_nao, e_gto)
if __name__ == "__main__": unittest.main()
|
gkc1000/pyscf
|
pyscf/nao/test/test_0043_h2_vh_gto_vs_nao_nao.py
|
Python
|
apache-2.0
| 3,194
|
[
"PySCF"
] |
e60067f0e48ef84146b25df9d03768f5666af324f6b45eac73c6777a1ff859c5
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
#******************************************************************************
# ZYNTHIAN PROJECT: Zynthian GUI
#
# Zynthian GUI Touchscreen Calibration Class
#
# Copyright (C) 2020 Brian Walton <brian@riban.co.uk>
#
#******************************************************************************
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of
# the License, or any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# For a full copy of the GNU General Public License see the LICENSE.txt file.
#
#******************************************************************************
import tkinter
import logging
import tkinter.font as tkFont
from threading import Timer, Thread
from subprocess import run,PIPE
from datetime import datetime # Only to timestamp config file updates
from evdev import InputDevice, ecodes
from select import select
import os
# Zynthian specific modules
from zyngui import zynthian_gui_config
# Little class to represent x,y coordinates
class point:
x = 0.0
y = 0.0
def __init__(self, x=0, y=0):
self.x = x
self.y = y
#------------------------------------------------------------------------------
# Zynthian Touchscreen Calibration GUI Class
#------------------------------------------------------------------------------
# Class implements zynthian touchscreen calibration
class zynthian_gui_touchscreen_calibration:
# Function to initialise class
def __init__(self):
self.shown = False
self.zyngui=zynthian_gui_config.zyngui
self.height = zynthian_gui_config.display_height
self.width = zynthian_gui_config.display_width
self.debounce = 0.5 * self.height # Clicks cannot be closer than this
# Main Frame
self.main_frame = tkinter.Frame(zynthian_gui_config.top,
width = self.width,
height = self.height,
bg = zynthian_gui_config.color_bg,
cursor="none")
# Canvas
self.canvas = tkinter.Canvas(self.main_frame,
height = self.height,
width = self.width,
bg="black",
bd=0,
highlightthickness=0)
# Coordinate transform matrix
self.display_points = [point(self.width * 0.15, self.height * 0.15),
point(self.width * 0.85, self.height * 0.85),
point(self.width * 0.5, self.height * 0.5)]
self.touch_points = [point(), point()] # List of touch point results
# Crosshair target
self.index = 2 # Index of current calibration point (0=NW, 1=SE, 2=CENTRE)
self.crosshair_size = self.width / 20 # half width of cross hairs
self.crosshair_circle = self.canvas.create_oval(
self.display_points[self.index].x - self.crosshair_size * 0.8, self.display_points[self.index].y - self.crosshair_size * 0.8,
self.display_points[self.index].x + self.crosshair_size * 0.8, self.display_points[self.index].y + self.crosshair_size * 0.8,
width=3, outline="white", tags=("crosshairs","crosshairs_circles"))
self.crosshair_inner_circle = self.canvas.create_oval(
self.display_points[self.index].x - self.crosshair_size * 0.2, self.display_points[self.index].y - self.crosshair_size * 0.2,
self.display_points[self.index].x + self.crosshair_size * 0.2, self.display_points[self.index].y + self.crosshair_size * 0.2,
width=3, outline="white", tags=("crosshairs","crosshairs_circles"))
self.crosshair_vertical = self.canvas.create_line(
self.display_points[self.index].x, self.display_points[self.index].y - self.crosshair_size,
self.display_points[self.index].x, self.display_points[self.index].y - self.crosshair_size,
width=3, fill="white", tags=("crosshairs","crosshairs_lines"))
self.crosshair_horizontal = self.canvas.create_line(
self.display_points[self.index].x - self.crosshair_size, self.display_points[self.index].y,
self.display_points[self.index].x + self.crosshair_size, self.display_points[self.index].y,
width=3, fill="white", tags=("crosshairs","crosshairs_lines"))
self.canvas.pack()
# Countdown timer
self.countdown_text = self.canvas.create_text(self.width / 2,
self.height / 2 - self.crosshair_size - zynthian_gui_config.font_size - 2,
font=(zynthian_gui_config.font_family, zynthian_gui_config.font_size, "normal"),
fill="red")
self.timer = Timer(interval=1, function=self.onTimer)
self.timeout = 15 # Period in seconds after last touch until sceen closes with no change
self.pressed = False # True if screen pressed
# Instruction text
self.instruction_text = self.canvas.create_text(self.width / 2,
self.height / 2 + self.crosshair_size + 2 + zynthian_gui_config.font_size * 2,
font=(zynthian_gui_config.font_family, zynthian_gui_config.font_size, "normal"),
fill="white",
text="Touch crosshairs using a stylus")
self.device_text = self.canvas.create_text(self.width / 2,
self.height - zynthian_gui_config.font_size * 2,
font=(zynthian_gui_config.font_family, zynthian_gui_config.font_size, "normal"),
fill="white")
self.device_name = None # libinput name of selected device
# Run xinput
# args: List of arguments to pass to xinput
# Returns: Output of xinput as string
# Credit: https://github.com/reinderien/xcalibrate
def xinput(self, *args):
try:
return run(args=('/usr/bin/xinput', *args),
stdout=PIPE, check=True,
universal_newlines=True).stdout
except:
return ""
# Thread waiting for first touch to detect touch interface
def detectDevice(self):
# Populate list of absolute x/y devices
devices = []
for filename in os.listdir("/dev/input"):
if filename.startswith("event"):
device = InputDevice("/dev/input/%s" % (filename))
if ecodes.EV_ABS in device.capabilities().keys():
devices.append(device)
# Loop until we get a touch button event or the view hides
running = True
while running and self.shown:
r, w, x = select(devices, [], []) # Wait for any of the devices to trigger an event
for device in r: # Iterate through all devices that have triggered events
for event in device.read(): # Iterate through all events from each device
if event.code == ecodes.BTN_TOUCH:
if event.value:
self.canvas.itemconfig("crosshairs_lines", fill="red")
self.canvas.itemconfig("crosshairs_circles", outline="red")
self.pressed = True
self.countdown = self.timeout
self.setDevice(device.name, device.path)
else:
self.canvas.itemconfig("crosshairs_lines", fill="white")
self.canvas.itemconfig("crosshairs_circles", outline="white")
self.pressed = False
self.countdown = self.timeout
if self.device_name:
self.index = 0
self.drawCross()
self.canvas.bind('<Button-1>', self.onPress)
self.canvas.bind('<ButtonRelease-1>', self.onRelease)
running = False
# Set the device to configure
# name: evdev device name
# path: Path to device, e.g. '/dev/input/event0'
# Returns: True on success
def setDevice(self, name, path):
# Transform evdev name to libinput name
props = None
for libinput_name in self.xinput("--list", "--name-only").split("\n"):
props_temp = self.xinput('--list-props', libinput_name)
if props_temp.find(path) != -1:
props = props_temp
break
if not props:
return False
self.device_name = libinput_name
self.canvas.itemconfig(self.device_text, text=name)
props = self.xinput('--list-props', self.device_name)
ctm_start = props.find('Coordinate Transformation Matrix')
ctm_end = props.find("\n", ctm_start)
if ctm_start < 0 or ctm_end < 0:
return False
ctm_start += 40
node_start = props.find('Device Node')
node_start = props.find('"', node_start)
node_end = props.find('"', node_start + 1)
if node_start < 0 or node_end < 0:
return False
# Store CTM to allow restore if we cancel calibration
self.ctm = []
for value in props[ctm_start:ctm_end].split(", "):
self.ctm.append(float(value))
self.node = props[node_start:node_end] # Get node name to allow mapping between evdev and xinput names
self.setCalibration(self.device_name, [1,0,0,0,1,0,0,0,1]) # Reset calibration to allow absolute acquisition
return True
# Handle touch press event
# event: Event including x,y coordinates (optional)
def onPress(self, event=None):
if self.device_name and not self.pressed:
self.canvas.itemconfig("crosshairs_lines", fill="red")
self.canvas.itemconfig("crosshairs_circles", outline="red")
self.pressed = True
# Handle touch release event
# event: Event including x,y coordinates
def onRelease(self, event):
self.canvas.itemconfig("crosshairs_lines", fill="white")
self.canvas.itemconfig("crosshairs_circles", outline="white")
if not self.pressed:
return
self.pressed = False
self.countdown = self.timeout
if not self.device_name:
return
if self.index < 2:
# More points to acquire
self.touch_points[self.index].x = event.x
self.touch_points[self.index].y = event.y
self.index += 1
if self.index > 1:
# Debounce
if abs(self.touch_points[0].x - self.touch_points[1].x) < self.debounce and abs(self.touch_points[0].y - self.touch_points[1].y) < self.debounce:
self.index = 0
else:
min_x = self.touch_points[0].x
max_x = self.touch_points[1].x
min_y = self.touch_points[0].y
max_y = self.touch_points[1].y
if min_x == max_x or min_y == max_y:
self.index = 0
self.drawCross()
return
# Acquisition complete - calculate calibration data
a = self.width * 0.7 / (max_x - min_x)
if min_x < max_x:
c = (self.width * 0.15 - a * min_x) / self.width
else:
c = (self.width * 0.15 - a * min_x) / self.width
e = self.height * 0.7 / (max_y - min_y)
if min_y < max_y:
f = (self.height * 0.15 - e * min_y) / self.height
else:
f = (self.height * 0.15 - e * min_y) / self.height
self.setCalibration(self.device_name, [a, 0, c, 0, e, f, 0, 0, 1], True)
#TODO: Allow user to check calibration
self.zyngui.zynswitch_defered('S',1)
return
self.drawCross()
# Draws the crosshairs for touch registration for current index (0=NW,1=SE,2=CENTRE)
def drawCross(self):
if self.index > 2:
return
self.canvas.coords(self.crosshair_vertical,
self.display_points[self.index].x, self.display_points[self.index].y - self.crosshair_size,
self.display_points[self.index].x, self.display_points[self.index].y + self.crosshair_size)
self.canvas.coords(self.crosshair_horizontal,
self.display_points[self.index].x - self.crosshair_size, self.display_points[self.index].y,
self.display_points[self.index].x + self.crosshair_size, self.display_points[self.index].y)
self.canvas.coords(self.crosshair_circle,
self.display_points[self.index].x - self.crosshair_size * 0.8, self.display_points[self.index].y - self.crosshair_size * 0.8,
self.display_points[self.index].x + self.crosshair_size * 0.8, self.display_points[self.index].y + self.crosshair_size * 0.8)
self.canvas.coords(self.crosshair_inner_circle,
self.display_points[self.index].x - self.crosshair_size * 0.2, self.display_points[self.index].y - self.crosshair_size * 0.2,
self.display_points[self.index].x + self.crosshair_size * 0.2, self.display_points[self.index].y + self.crosshair_size * 0.2)
# Apply screen calibration
# device: libinput name or ID of device to calibrate
# matrix: Transform matrix as 9 element array (3x3)
# write_file: True to write configuration to file (default: false)
def setCalibration(self, device, matrix, write_file=False):
try:
logging.debug("Calibration touchscreen '%s' with matrix [%f %f %f %f %f %f %f %f %f]",
device,
matrix[0],
matrix[1],
matrix[2],
matrix[3],
matrix[4],
matrix[5],
matrix[6],
matrix[7],
matrix[8])
self.xinput("--set-prop", device, "Coordinate Transformation Matrix",
str(matrix[0]), str(matrix[1]), str(matrix[2]), str(matrix[3]), str(matrix[4]), str(matrix[5]), str(matrix[6]), str(matrix[7]), str(matrix[8]))
if write_file:
# Update exsting config in file
"""
try:
f = open("/etc/X11/xorg.conf.d/99-calibration.conf", "r")
config = f.read()
section_start = config.find('Section "InputClass"')
while section_start >= 0:
section_end = config.find('EndSection', section_start)
if section_end > section_start and config.find('MatchProduct "%s'%(device), section_start, section_end) > section_start:
tm_start = config.find('Option "TransformationMatrix"', section_start, section_end)
tm_end = config.find('\n', tm_start, section_end)
if tm_start > section_start and tm_end > tm_start:
f = open("/etc/X11/xorg.conf.d/99-calibration.conf", "w")
f.write(config[:tm_start + 29])
f.write(' "%f %f %f %f %f %f %f %f %f"' % (matrix[0], matrix[1], matrix[2], matrix[3], matrix[4], matrix[5], matrix[6], matrix[7], matrix[8]))
f.write(' # updated %s'%(datetime.now()))
f.write(config[tm_end:])
f.close()
return
section_start = config.find('Section "InputClass"', section_end)
except:
pass # File probably does not yet exist
"""
# If we got here then we need to append this device to config
# For the record it is with deep reservation that I code this duplicate writing of files - I was only follwing orders!
try:
os.mkdir(os.environ.get("ZYNTHIAN_CONFIG_DIR") + "/touchscreen/")
except:
pass # directory already exists
with open(os.environ.get("ZYNTHIAN_CONFIG_DIR") + "/touchscreen/" + os.environ.get("DISPLAY_NAME"), "w") as f:
f.write('Section "InputClass" # Created %s\n'%(datetime.now()))
f.write(' Identifier "calibration"\n')
f.write(' MatchProduct "%s"\n'%(device))
f.write(' Option "TransformationMatrix" "%f %f %f %f %f %f %f %f %f"\n' % (matrix[0], matrix[1], matrix[2], matrix[3], matrix[4], matrix[5], matrix[6], matrix[7], matrix[8]))
f.write('EndSection\n')
with open("/etc/X11/xorg.conf.d/99-calibration.conf", "w") as f:
f.write('Section "InputClass" # Created %s\n'%(datetime.now()))
f.write(' Identifier "calibration"\n')
f.write(' MatchProduct "%s"\n'%(device))
f.write(' Option "TransformationMatrix" "%f %f %f %f %f %f %f %f %f"\n' % (matrix[0], matrix[1], matrix[2], matrix[3], matrix[4], matrix[5], matrix[6], matrix[7], matrix[8]))
f.write('EndSection\n')
except Exception as e:
logging.warning("Failed to set touchscreen calibration", e)
# Hide display
# reset: True to reset calibration (default: True)
def hide(self, reset=True):
if self.shown:
self.timer.cancel()
if reset and self.device_name and self.ctm:
self.setCalibration(self.device_name, self.ctm)
self.main_frame.grid_forget()
self.shown=False
# Show display
def show(self):
if not self.shown:
self.shown=True
self.device_name = None
self.ctm = None
self.canvas.unbind('<Button-1>')
self.canvas.unbind('<ButtonRelease-1>')
self.canvas.itemconfig(self.countdown_text, text="Closing in %ds" % (self.timeout))
self.canvas.itemconfig(self.device_text, text="")
self.countdown = self.timeout
self.index = 2
self.drawCross()
self.main_frame.grid()
self.onTimer()
self.detect_thread = Thread(target=self.detectDevice, args=(), daemon=True)
self.detect_thread.start()
# Handle one second timer trigger
def onTimer(self):
if self.shown:
self.canvas.itemconfig(self.countdown_text, text="Closing in %ds" % (self.countdown))
if self.countdown <= 0:
self.zyngui.zynswitch_defered('S',1)
return
if not self.pressed:
self.countdown -= 1
self.timer = Timer(interval=1, function=self.onTimer)
self.timer.start()
# Handle zyncoder read - called by parent when zyncoders updated
def zyncoder_read(self):
pass
# Handle refresh loading - called by parent during screen load
def refresh_loading(self):
pass
# Handle physical switch press
# type: Switch duration type (default: short)
def switch_select(self, type='S'):
pass
#-------------------------------------------------------------------------------
|
zynthian/zynthian-ui
|
zyngui/zynthian_gui_touchscreen_calibration.py
|
Python
|
gpl-3.0
| 16,339
|
[
"Brian"
] |
38893a018b6c7618bff81f630371fc1bab9916e53003fc120d757dd54f166e7d
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
"""
This class provides read and write functions of VASP POSCAR file.
"""
import numpy as np
import copy
class POSCAR(object):
"""
POSCAR class enables POSCAR read/write functions.
"""
def __init__(self,fname="POSCAR"):
self.fname = fname
self.h = np.zeros((3,3),dtype=float)
self.num_atoms= []
self.pos= []
self.flags= []
self.species= []
# def to_dict(self):
# """
# Returns a dictionary-type variable of the object.
# """
# dict= {}
# dict.update({'comment':self.c1})
# dict.update({'afac':self.afac})
# dict.update({})
def read(self,fname = 'POSCAR'):
self.fname = fname
f= open(fname,'r')
#.....1st line: comment
self.c1= f.readline()
#.....2nd line: multiplying factor
self.afac= float(f.readline().split()[0])
#.....3rd-5th lines: lattice vectors
# NOTE (180409): The definition of the hmat here is different from hmat in NAPSystem.
# The hmat in NAPSystem is transpose of this.
data= f.readline().split()
self.h[0]= [ float(x) for x in data ]
data= f.readline().split()
self.h[1]= [ float(x) for x in data ]
data= f.readline().split()
self.h[2]= [ float(x) for x in data ]
#.....6th line: num of atoms
data= f.readline().split()
if not data[0].isdigit(): # if it is not digit, read next line
self.species = copy.copy(data)
data = f.readline().split()
self.num_atoms= np.array([ int(n) for n in data ])
#.....7th line: comment
self.c7= f.readline()
if self.c7[0] in ('s','S'):
self.c8= f.readline()
#.....following lines: atom positions
for ni in self.num_atoms:
for j in range(ni):
data= f.readline().split()
self.pos.append(np.array([ float(x) for x in data[0:3] ]))
if len(data) > 3:
if len(data) == 6:
self.flags.append([ x for x in data[3:6] ])
elif len(data) == 4:
self.flags.append([ data[3],data[3],data[3] ])
else:
self.flags.append([ 'T', 'T', 'T' ])
f.close()
def write(self,fname='POSCAR'):
f= open(fname,'w')
f.write(self.c1)
f.write(' {0:12.7f}\n'.format(self.afac))
f.write(' {0:12.7f} {1:12.7f} {2:12.7f}\n'.format(self.h[0,0],
self.h[0,1],
self.h[0,2]))
f.write(' {0:12.7f} {1:12.7f} {2:12.7f}\n'.format(self.h[1,0],
self.h[1,1],
self.h[1,2]))
f.write(' {0:12.7f} {1:12.7f} {2:12.7f}\n'.format(self.h[2,0],
self.h[2,1],
self.h[2,2]))
for n in self.num_atoms:
f.write(' {0:3d}'.format(n))
f.write('\n')
f.write(self.c7)
if hasattr(self, 'c8'):
f.write(self.c8)
for i in range(len(self.pos)):
f.write(' {0:12.7f} {1:12.7f} {2:12.7f}'.format(self.pos[i][0],
self.pos[i][1],
self.pos[i][2]))
f.write(' {0} {1} {2}\n'.format(self.flags[i][0],
self.flags[i][1],
self.flags[i][2]))
f.close()
if __name__ == '__main__':
poscar= POSCAR()
poscar.read()
poscar.write(fname='TEST-POSCAR')
|
ryokbys/nap
|
nappy/vasp/poscar.py
|
Python
|
mit
| 3,936
|
[
"VASP"
] |
5b91441cbad8676ca2a4954e7c18243c7ee7adbf8ef3700d525f5085e65a7f7f
|
"""Soundex algorithm
This program is part of "Dive Into Python", a free Python book for
experienced programmers. Visit http://diveintopython.org/ for the
latest version.
"""
__author__ = "Mark Pilgrim (mark@diveintopython.org)"
__version__ = "$Revision: 1.3 $"
__date__ = "$Date: 2004/05/11 19:11:21 $"
__copyright__ = "Copyright (c) 2004 Mark Pilgrim"
__license__ = "Python"
import string
allChar = string.uppercase + string.lowercase
charToSoundex = string.maketrans(allChar, "91239129922455912623919292" * 2)
def soundex(source):
"convert string to Soundex equivalent"
# Soundex requirements:
# source string must be at least 1 character
# and must consist entirely of letters
if (not source) or (not source.isalpha()):
return "0000"
# Soundex algorithm:
# 1. make first character uppercase
# 2. translate all other characters to Soundex digits
digits = source[0].upper() + source[1:].translate(charToSoundex)
# 3. remove consecutive duplicates
digits2 = digits[0]
for d in digits[1:]:
if digits2[-1] != d:
digits2 += d
# 4. remove all "9"s
digits3 = ''
for d in digits2:
if d != '9':
digits3 += d
# 5. pad end with "0"s to 4 characters
while len(digits3) < 4:
digits3 += "0"
# 6. return first 4 characters
return digits3[:4]
if __name__ == '__main__':
from timeit import Timer
names = ('Woo', 'Pilgrim', 'Flingjingwaller')
for name in names:
statement = "soundex('%s')" % name
t = Timer(statement, "from __main__ import soundex")
print name.ljust(15), soundex(name), min(t.repeat())
|
tapomayukh/projects_in_python
|
sandbox_tapo/src/refs/diveintopython-pdf-5.4/diveintopython-5.4/py/soundex/stage4/soundex4a.py
|
Python
|
mit
| 1,688
|
[
"VisIt"
] |
8504f8e73d7efed5b5f9edb8ab6be4b1147ae1d280fa6b7ebf6a542863e4ff12
|
# Author : Arda ICMEZ
# Date : 04.10.15
import numpy as np
import matplotlib.pyplot as plt
def populate(arr):
"""
Rounding up numbers from the given gaussian array, adding them into
the dictionary with the calculated index
arr : list<double>
"""
roundedArr = {}
for item in arr:
keyItem = int(round(item))
if roundedArr.has_key(keyItem):
roundedArr[keyItem] += 1
else:
roundedArr[keyItem] = 1
return roundedArr
def normalize(arr,sumNumbers):
"""
Normalizing values of the array
arr : dictionary<int,double>
sumNumbers : int
"""
for item in arr:
arr[item] = round(arr[item]/sumNumbers,4)
return arr
def getDistance(arr1,arr2):
"""
Calculating the distance of 2 uniform distributions using Wasserstein metric method
arr1, arr2 : dictionary<int,double>
"""
mySum = 0.0
distIndex = sorted(arr1.keys())[0] - sorted(arr2.keys())[0]
if distIndex>0:
tempArr = arr1
arr1=arr2
arr2=tempArr
distIndex = abs(distIndex)
tempArr = arr2.copy()
for item in sorted(arr1.keys()):
while arr1[item] > 0.0:
myIndex = item+distIndex
if arr2.has_key(myIndex):
if(arr1[item]> arr2[item+distIndex]):
mySum += arr2[item+distIndex]*abs(distIndex)
arr1[item] -= arr2[item+distIndex]
del tempArr[item+distIndex]
distIndex+=1
elif arr1[item] == arr2[item+distIndex]:
mySum+= arr1[item] *abs(distIndex)
arr1[item] = 0
del tempArr[item+distIndex]
else:
mySum += arr1[item]*abs(distIndex)
arr2[item+distIndex] -= arr1[item]
arr1[item]=0
distIndex-=1
else:
if not tempArr:
print "HATA PAYI GELDI : ", arr1[item]
break
else:
distIndex = sorted(tempArr.keys())[0] - item
return mySum
N = 10000
firstTuple = (round(np.random.uniform(-5,5),2),round(np.random.uniform(0.5,1.5),2),N) # (Sigma,Mu,N)
secondTuple = (round(np.random.uniform(-5,5),2),round(np.random.uniform(0.5,1.5),2),N) # (Sigma,Mu,N)
gaussArr1 = np.random.normal(*firstTuple)
gaussArr2 = np.random.normal(*secondTuple)
roundedArr1 = populate(gaussArr1)
roundedArr2 = populate(gaussArr2)
sumNumbers = float(N) # Because uniform distribution, sum(roundedArr1|2) == N
roundedArr1 = normalize(roundedArr1,sumNumbers)
roundedArr2 = normalize(roundedArr2,sumNumbers)
mySum = getDistance(roundedArr1.copy(),roundedArr2.copy())
print "My distance is : " , mySum
plt.figure(1)
plt.title("My Histogram")
plt.axis((20,-20,0,1))
p1 = plt.bar(roundedArr1.keys(),roundedArr1.values(),1.0, color='g')
p2 = plt.bar(roundedArr2.keys(),roundedArr2.values(),1.0, color='r')
plt.show()
|
ArdaIcmez/dagitik
|
odev01/odev01.py
|
Python
|
gpl-2.0
| 3,013
|
[
"Gaussian"
] |
dc45ed3de60bfd3c43056379f12b96224e428c4713b1391af8b6a6de37f0627f
|
# -*- coding: utf-8 -*-
"""IPython Shell classes.
All the matplotlib support code was co-developed with John Hunter,
matplotlib's author.
"""
#*****************************************************************************
# Copyright (C) 2001-2006 Fernando Perez <fperez@colorado.edu>
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#*****************************************************************************
# Code begins
# Stdlib imports
import __builtin__
import __main__
import Queue
import inspect
import os
import sys
import thread
import threading
import time
from signal import signal, SIGINT
try:
import ctypes
HAS_CTYPES = True
except ImportError:
HAS_CTYPES = False
# IPython imports
import IPython
from IPython import ultraTB, ipapi
from IPython.Magic import Magic
from IPython.genutils import Term,warn,error,flag_calls, ask_yes_no
from IPython.iplib import InteractiveShell
from IPython.ipmaker import make_IPython
from IPython.ipstruct import Struct
from IPython.testing import decorators as testdec
# Globals
# global flag to pass around information about Ctrl-C without exceptions
KBINT = False
# global flag to turn on/off Tk support.
USE_TK = False
# ID for the main thread, used for cross-thread exceptions
MAIN_THREAD_ID = thread.get_ident()
# Tag when runcode() is active, for exception handling
CODE_RUN = None
# Default timeout for waiting for multithreaded shells (in seconds)
GUI_TIMEOUT = 10
#-----------------------------------------------------------------------------
# This class is trivial now, but I want to have it in to publish a clean
# interface. Later when the internals are reorganized, code that uses this
# shouldn't have to change.
class IPShell:
"""Create an IPython instance."""
def __init__(self,argv=None,user_ns=None,user_global_ns=None,
debug=1,shell_class=InteractiveShell):
self.IP = make_IPython(argv,user_ns=user_ns,
user_global_ns=user_global_ns,
debug=debug,shell_class=shell_class)
def mainloop(self,sys_exit=0,banner=None):
self.IP.mainloop(banner)
if sys_exit:
sys.exit()
#-----------------------------------------------------------------------------
def kill_embedded(self,parameter_s=''):
"""%kill_embedded : deactivate for good the current embedded IPython.
This function (after asking for confirmation) sets an internal flag so that
an embedded IPython will never activate again. This is useful to
permanently disable a shell that is being called inside a loop: once you've
figured out what you needed from it, you may then kill it and the program
will then continue to run without the interactive shell interfering again.
"""
kill = ask_yes_no("Are you sure you want to kill this embedded instance "
"(y/n)? [y/N] ",'n')
if kill:
self.shell.embedded_active = False
print "This embedded IPython will not reactivate anymore once you exit."
class IPShellEmbed:
"""Allow embedding an IPython shell into a running program.
Instances of this class are callable, with the __call__ method being an
alias to the embed() method of an InteractiveShell instance.
Usage (see also the example-embed.py file for a running example):
ipshell = IPShellEmbed([argv,banner,exit_msg,rc_override])
- argv: list containing valid command-line options for IPython, as they
would appear in sys.argv[1:].
For example, the following command-line options:
$ ipython -prompt_in1 'Input <\\#>' -colors LightBG
would be passed in the argv list as:
['-prompt_in1','Input <\\#>','-colors','LightBG']
- banner: string which gets printed every time the interpreter starts.
- exit_msg: string which gets printed every time the interpreter exits.
- rc_override: a dict or Struct of configuration options such as those
used by IPython. These options are read from your ~/.ipython/ipythonrc
file when the Shell object is created. Passing an explicit rc_override
dict with any options you want allows you to override those values at
creation time without having to modify the file. This way you can create
embeddable instances configured in any way you want without editing any
global files (thus keeping your interactive IPython configuration
unchanged).
Then the ipshell instance can be called anywhere inside your code:
ipshell(header='') -> Opens up an IPython shell.
- header: string printed by the IPython shell upon startup. This can let
you know where in your code you are when dropping into the shell. Note
that 'banner' gets prepended to all calls, so header is used for
location-specific information.
For more details, see the __call__ method below.
When the IPython shell is exited with Ctrl-D, normal program execution
resumes.
This functionality was inspired by a posting on comp.lang.python by cmkl
<cmkleffner@gmx.de> on Dec. 06/01 concerning similar uses of pyrepl, and
by the IDL stop/continue commands."""
def __init__(self,argv=None,banner='',exit_msg=None,rc_override=None,
user_ns=None):
"""Note that argv here is a string, NOT a list."""
self.set_banner(banner)
self.set_exit_msg(exit_msg)
self.set_dummy_mode(0)
# sys.displayhook is a global, we need to save the user's original
# Don't rely on __displayhook__, as the user may have changed that.
self.sys_displayhook_ori = sys.displayhook
# save readline completer status
try:
#print 'Save completer',sys.ipcompleter # dbg
self.sys_ipcompleter_ori = sys.ipcompleter
except:
pass # not nested with IPython
self.IP = make_IPython(argv,rc_override=rc_override,
embedded=True,
user_ns=user_ns)
ip = ipapi.IPApi(self.IP)
ip.expose_magic("kill_embedded",kill_embedded)
# copy our own displayhook also
self.sys_displayhook_embed = sys.displayhook
# and leave the system's display hook clean
sys.displayhook = self.sys_displayhook_ori
# don't use the ipython crash handler so that user exceptions aren't
# trapped
sys.excepthook = ultraTB.FormattedTB(color_scheme = self.IP.rc.colors,
mode = self.IP.rc.xmode,
call_pdb = self.IP.rc.pdb)
self.restore_system_completer()
def restore_system_completer(self):
"""Restores the readline completer which was in place.
This allows embedded IPython within IPython not to disrupt the
parent's completion.
"""
try:
self.IP.readline.set_completer(self.sys_ipcompleter_ori)
sys.ipcompleter = self.sys_ipcompleter_ori
except:
pass
def __call__(self,header='',local_ns=None,global_ns=None,dummy=None):
"""Activate the interactive interpreter.
__call__(self,header='',local_ns=None,global_ns,dummy=None) -> Start
the interpreter shell with the given local and global namespaces, and
optionally print a header string at startup.
The shell can be globally activated/deactivated using the
set/get_dummy_mode methods. This allows you to turn off a shell used
for debugging globally.
However, *each* time you call the shell you can override the current
state of dummy_mode with the optional keyword parameter 'dummy'. For
example, if you set dummy mode on with IPShell.set_dummy_mode(1), you
can still have a specific call work by making it as IPShell(dummy=0).
The optional keyword parameter dummy controls whether the call
actually does anything. """
# If the user has turned it off, go away
if not self.IP.embedded_active:
return
# Normal exits from interactive mode set this flag, so the shell can't
# re-enter (it checks this variable at the start of interactive mode).
self.IP.exit_now = False
# Allow the dummy parameter to override the global __dummy_mode
if dummy or (dummy != 0 and self.__dummy_mode):
return
# Set global subsystems (display,completions) to our values
sys.displayhook = self.sys_displayhook_embed
if self.IP.has_readline:
self.IP.set_completer()
if self.banner and header:
format = '%s\n%s\n'
else:
format = '%s%s\n'
banner = format % (self.banner,header)
# Call the embedding code with a stack depth of 1 so it can skip over
# our call and get the original caller's namespaces.
self.IP.embed_mainloop(banner,local_ns,global_ns,stack_depth=1)
if self.exit_msg:
print self.exit_msg
# Restore global systems (display, completion)
sys.displayhook = self.sys_displayhook_ori
self.restore_system_completer()
def set_dummy_mode(self,dummy):
"""Sets the embeddable shell's dummy mode parameter.
set_dummy_mode(dummy): dummy = 0 or 1.
This parameter is persistent and makes calls to the embeddable shell
silently return without performing any action. This allows you to
globally activate or deactivate a shell you're using with a single call.
If you need to manually"""
if dummy not in [0,1,False,True]:
raise ValueError,'dummy parameter must be boolean'
self.__dummy_mode = dummy
def get_dummy_mode(self):
"""Return the current value of the dummy mode parameter.
"""
return self.__dummy_mode
def set_banner(self,banner):
"""Sets the global banner.
This banner gets prepended to every header printed when the shell
instance is called."""
self.banner = banner
def set_exit_msg(self,exit_msg):
"""Sets the global exit_msg.
This exit message gets printed upon exiting every time the embedded
shell is called. It is None by default. """
self.exit_msg = exit_msg
#-----------------------------------------------------------------------------
if HAS_CTYPES:
# Add async exception support. Trick taken from:
# http://sebulba.wikispaces.com/recipe+thread2
def _async_raise(tid, exctype):
"""raises the exception, performs cleanup if needed"""
if not inspect.isclass(exctype):
raise TypeError("Only types can be raised (not instances)")
# Explicit cast to c_long is necessary for 64-bit support:
# See https://bugs.launchpad.net/ipython/+bug/237073
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(ctypes.c_long(tid),
ctypes.py_object(exctype))
if res == 0:
raise ValueError("invalid thread id")
elif res != 1:
# If it returns a number greater than one, you're in trouble,
# and you should call it again with exc=NULL to revert the effect
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, 0)
raise SystemError("PyThreadState_SetAsyncExc failed")
def sigint_handler(signum,stack_frame):
"""Sigint handler for threaded apps.
This is a horrible hack to pass information about SIGINT _without_
using exceptions, since I haven't been able to properly manage
cross-thread exceptions in GTK/WX. In fact, I don't think it can be
done (or at least that's my understanding from a c.l.py thread where
this was discussed)."""
global KBINT
if CODE_RUN:
_async_raise(MAIN_THREAD_ID,KeyboardInterrupt)
else:
KBINT = True
print '\nKeyboardInterrupt - Press <Enter> to continue.',
Term.cout.flush()
else:
def sigint_handler(signum,stack_frame):
"""Sigint handler for threaded apps.
This is a horrible hack to pass information about SIGINT _without_
using exceptions, since I haven't been able to properly manage
cross-thread exceptions in GTK/WX. In fact, I don't think it can be
done (or at least that's my understanding from a c.l.py thread where
this was discussed)."""
global KBINT
print '\nKeyboardInterrupt - Press <Enter> to continue.',
Term.cout.flush()
# Set global flag so that runsource can know that Ctrl-C was hit
KBINT = True
class MTInteractiveShell(InteractiveShell):
"""Simple multi-threaded shell."""
# Threading strategy taken from:
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/65109, by Brian
# McErlean and John Finlay. Modified with corrections by Antoon Pardon,
# from the pygtk mailing list, to avoid lockups with system calls.
# class attribute to indicate whether the class supports threads or not.
# Subclasses with thread support should override this as needed.
isthreaded = True
def __init__(self,name,usage=None,rc=Struct(opts=None,args=None),
user_ns=None,user_global_ns=None,banner2='',
gui_timeout=GUI_TIMEOUT,**kw):
"""Similar to the normal InteractiveShell, but with threading control"""
InteractiveShell.__init__(self,name,usage,rc,user_ns,
user_global_ns,banner2)
# Timeout we wait for GUI thread
self.gui_timeout = gui_timeout
# A queue to hold the code to be executed.
self.code_queue = Queue.Queue()
# Stuff to do at closing time
self._kill = None
on_kill = kw.get('on_kill', [])
# Check that all things to kill are callable:
for t in on_kill:
if not callable(t):
raise TypeError,'on_kill must be a list of callables'
self.on_kill = on_kill
# thread identity of the "worker thread" (that may execute code directly)
self.worker_ident = None
def runsource(self, source, filename="<input>", symbol="single"):
"""Compile and run some source in the interpreter.
Modified version of code.py's runsource(), to handle threading issues.
See the original for full docstring details."""
global KBINT
# If Ctrl-C was typed, we reset the flag and return right away
if KBINT:
KBINT = False
return False
if self._kill:
# can't queue new code if we are being killed
return True
try:
code = self.compile(source, filename, symbol)
except (OverflowError, SyntaxError, ValueError):
# Case 1
self.showsyntaxerror(filename)
return False
if code is None:
# Case 2
return True
# shortcut - if we are in worker thread, or the worker thread is not
# running, execute directly (to allow recursion and prevent deadlock if
# code is run early in IPython construction)
if (self.worker_ident is None
or self.worker_ident == thread.get_ident() ):
InteractiveShell.runcode(self,code)
return False
# Case 3
# Store code in queue, so the execution thread can handle it.
completed_ev, received_ev = threading.Event(), threading.Event()
self.code_queue.put((code,completed_ev, received_ev))
# first make sure the message was received, with timeout
received_ev.wait(self.gui_timeout)
if not received_ev.isSet():
# the mainloop is dead, start executing code directly
print "Warning: Timeout for mainloop thread exceeded"
print "switching to nonthreaded mode (until mainloop wakes up again)"
self.worker_ident = None
else:
completed_ev.wait()
return False
def runcode(self):
"""Execute a code object.
Multithreaded wrapper around IPython's runcode()."""
global CODE_RUN
# we are in worker thread, stash out the id for runsource()
self.worker_ident = thread.get_ident()
if self._kill:
print >>Term.cout, 'Closing threads...',
Term.cout.flush()
for tokill in self.on_kill:
tokill()
print >>Term.cout, 'Done.'
# allow kill() to return
self._kill.set()
return True
# Install sigint handler. We do it every time to ensure that if user
# code modifies it, we restore our own handling.
try:
signal(SIGINT,sigint_handler)
except SystemError:
# This happens under Windows, which seems to have all sorts
# of problems with signal handling. Oh well...
pass
# Flush queue of pending code by calling the run methood of the parent
# class with all items which may be in the queue.
code_to_run = None
while 1:
try:
code_to_run, completed_ev, received_ev = self.code_queue.get_nowait()
except Queue.Empty:
break
received_ev.set()
# Exceptions need to be raised differently depending on which
# thread is active. This convoluted try/except is only there to
# protect against asynchronous exceptions, to ensure that a KBINT
# at the wrong time doesn't deadlock everything. The global
# CODE_TO_RUN is set to true/false as close as possible to the
# runcode() call, so that the KBINT handler is correctly informed.
try:
try:
CODE_RUN = True
InteractiveShell.runcode(self,code_to_run)
except KeyboardInterrupt:
print "Keyboard interrupted in mainloop"
while not self.code_queue.empty():
code, ev1,ev2 = self.code_queue.get_nowait()
ev1.set()
ev2.set()
break
finally:
CODE_RUN = False
# allow runsource() return from wait
completed_ev.set()
# This MUST return true for gtk threading to work
return True
def kill(self):
"""Kill the thread, returning when it has been shut down."""
self._kill = threading.Event()
self._kill.wait()
class MatplotlibShellBase:
"""Mixin class to provide the necessary modifications to regular IPython
shell classes for matplotlib support.
Given Python's MRO, this should be used as the FIRST class in the
inheritance hierarchy, so that it overrides the relevant methods."""
def _matplotlib_config(self,name,user_ns,user_global_ns=None):
"""Return items needed to setup the user's shell with matplotlib"""
# Initialize matplotlib to interactive mode always
import matplotlib
from matplotlib import backends
matplotlib.interactive(True)
def use(arg):
"""IPython wrapper for matplotlib's backend switcher.
In interactive use, we can not allow switching to a different
interactive backend, since thread conflicts will most likely crash
the python interpreter. This routine does a safety check first,
and refuses to perform a dangerous switch. It still allows
switching to non-interactive backends."""
if arg in backends.interactive_bk and arg != self.mpl_backend:
m=('invalid matplotlib backend switch.\n'
'This script attempted to switch to the interactive '
'backend: `%s`\n'
'Your current choice of interactive backend is: `%s`\n\n'
'Switching interactive matplotlib backends at runtime\n'
'would crash the python interpreter, '
'and IPython has blocked it.\n\n'
'You need to either change your choice of matplotlib backend\n'
'by editing your .matplotlibrc file, or run this script as a \n'
'standalone file from the command line, not using IPython.\n' %
(arg,self.mpl_backend) )
raise RuntimeError, m
else:
self.mpl_use(arg)
self.mpl_use._called = True
self.matplotlib = matplotlib
self.mpl_backend = matplotlib.rcParams['backend']
# we also need to block switching of interactive backends by use()
self.mpl_use = matplotlib.use
self.mpl_use._called = False
# overwrite the original matplotlib.use with our wrapper
matplotlib.use = use
# This must be imported last in the matplotlib series, after
# backend/interactivity choices have been made
import matplotlib.pylab as pylab
self.pylab = pylab
self.pylab.show._needmain = False
# We need to detect at runtime whether show() is called by the user.
# For this, we wrap it into a decorator which adds a 'called' flag.
self.pylab.draw_if_interactive = flag_calls(self.pylab.draw_if_interactive)
# Build a user namespace initialized with matplotlib/matlab features.
user_ns, user_global_ns = IPython.ipapi.make_user_namespaces(user_ns,
user_global_ns)
# Import numpy as np/pyplot as plt are conventions we're trying to
# somewhat standardize on. Making them available to users by default
# will greatly help this.
exec ("import numpy\n"
"import numpy as np\n"
"import matplotlib\n"
"import matplotlib.pylab as pylab\n"
"try:\n"
" import matplotlib.pyplot as plt\n"
"except ImportError:\n"
" pass\n"
) in user_ns
# Build matplotlib info banner
b="""
Welcome to pylab, a matplotlib-based Python environment.
For more information, type 'help(pylab)'.
"""
return user_ns,user_global_ns,b
def mplot_exec(self,fname,*where,**kw):
"""Execute a matplotlib script.
This is a call to execfile(), but wrapped in safeties to properly
handle interactive rendering and backend switching."""
#print '*** Matplotlib runner ***' # dbg
# turn off rendering until end of script
isInteractive = self.matplotlib.rcParams['interactive']
self.matplotlib.interactive(False)
self.safe_execfile(fname,*where,**kw)
self.matplotlib.interactive(isInteractive)
# make rendering call now, if the user tried to do it
if self.pylab.draw_if_interactive.called:
self.pylab.draw()
self.pylab.draw_if_interactive.called = False
# if a backend switch was performed, reverse it now
if self.mpl_use._called:
self.matplotlib.rcParams['backend'] = self.mpl_backend
@testdec.skip_doctest
def magic_run(self,parameter_s=''):
Magic.magic_run(self,parameter_s,runner=self.mplot_exec)
# Fix the docstring so users see the original as well
magic_run.__doc__ = "%s\n%s" % (Magic.magic_run.__doc__,
"\n *** Modified %run for Matplotlib,"
" with proper interactive handling ***")
# Now we provide 2 versions of a matplotlib-aware IPython base shells, single
# and multithreaded. Note that these are meant for internal use, the IPShell*
# classes below are the ones meant for public consumption.
class MatplotlibShell(MatplotlibShellBase,InteractiveShell):
"""Single-threaded shell with matplotlib support."""
def __init__(self,name,usage=None,rc=Struct(opts=None,args=None),
user_ns=None,user_global_ns=None,**kw):
user_ns,user_global_ns,b2 = self._matplotlib_config(name,user_ns,user_global_ns)
InteractiveShell.__init__(self,name,usage,rc,user_ns,user_global_ns,
banner2=b2,**kw)
class MatplotlibMTShell(MatplotlibShellBase,MTInteractiveShell):
"""Multi-threaded shell with matplotlib support."""
def __init__(self,name,usage=None,rc=Struct(opts=None,args=None),
user_ns=None,user_global_ns=None, **kw):
user_ns,user_global_ns,b2 = self._matplotlib_config(name,user_ns,user_global_ns)
MTInteractiveShell.__init__(self,name,usage,rc,user_ns,user_global_ns,
banner2=b2,**kw)
#-----------------------------------------------------------------------------
# Utility functions for the different GUI enabled IPShell* classes.
def get_tk():
"""Tries to import Tkinter and returns a withdrawn Tkinter root
window. If Tkinter is already imported or not available, this
returns None. This function calls `hijack_tk` underneath.
"""
if not USE_TK or sys.modules.has_key('Tkinter'):
return None
else:
try:
import Tkinter
except ImportError:
return None
else:
hijack_tk()
r = Tkinter.Tk()
r.withdraw()
return r
def hijack_tk():
"""Modifies Tkinter's mainloop with a dummy so when a module calls
mainloop, it does not block.
"""
def misc_mainloop(self, n=0):
pass
def tkinter_mainloop(n=0):
pass
import Tkinter
Tkinter.Misc.mainloop = misc_mainloop
Tkinter.mainloop = tkinter_mainloop
def update_tk(tk):
"""Updates the Tkinter event loop. This is typically called from
the respective WX or GTK mainloops.
"""
if tk:
tk.update()
def hijack_wx():
"""Modifies wxPython's MainLoop with a dummy so user code does not
block IPython. The hijacked mainloop function is returned.
"""
def dummy_mainloop(*args, **kw):
pass
try:
import wx
except ImportError:
# For very old versions of WX
import wxPython as wx
ver = wx.__version__
orig_mainloop = None
if ver[:3] >= '2.5':
import wx
if hasattr(wx, '_core_'): core = getattr(wx, '_core_')
elif hasattr(wx, '_core'): core = getattr(wx, '_core')
else: raise AttributeError('Could not find wx core module')
orig_mainloop = core.PyApp_MainLoop
core.PyApp_MainLoop = dummy_mainloop
elif ver[:3] == '2.4':
orig_mainloop = wx.wxc.wxPyApp_MainLoop
wx.wxc.wxPyApp_MainLoop = dummy_mainloop
else:
warn("Unable to find either wxPython version 2.4 or >= 2.5.")
return orig_mainloop
def hijack_gtk():
"""Modifies pyGTK's mainloop with a dummy so user code does not
block IPython. This function returns the original `gtk.mainloop`
function that has been hijacked.
"""
def dummy_mainloop(*args, **kw):
pass
import gtk
if gtk.pygtk_version >= (2,4,0): orig_mainloop = gtk.main
else: orig_mainloop = gtk.mainloop
gtk.mainloop = dummy_mainloop
gtk.main = dummy_mainloop
return orig_mainloop
def hijack_qt():
"""Modifies PyQt's mainloop with a dummy so user code does not
block IPython. This function returns the original
`qt.qApp.exec_loop` function that has been hijacked.
"""
def dummy_mainloop(*args, **kw):
pass
import qt
orig_mainloop = qt.qApp.exec_loop
qt.qApp.exec_loop = dummy_mainloop
qt.QApplication.exec_loop = dummy_mainloop
return orig_mainloop
def hijack_qt4():
"""Modifies PyQt4's mainloop with a dummy so user code does not
block IPython. This function returns the original
`QtGui.qApp.exec_` function that has been hijacked.
"""
def dummy_mainloop(*args, **kw):
pass
from PyQt4 import QtGui, QtCore
orig_mainloop = QtGui.qApp.exec_
QtGui.qApp.exec_ = dummy_mainloop
QtGui.QApplication.exec_ = dummy_mainloop
QtCore.QCoreApplication.exec_ = dummy_mainloop
return orig_mainloop
#-----------------------------------------------------------------------------
# The IPShell* classes below are the ones meant to be run by external code as
# IPython instances. Note that unless a specific threading strategy is
# desired, the factory function start() below should be used instead (it
# selects the proper threaded class).
class IPThread(threading.Thread):
def run(self):
self.IP.mainloop(self._banner)
self.IP.kill()
class IPShellGTK(IPThread):
"""Run a gtk mainloop() in a separate thread.
Python commands can be passed to the thread where they will be executed.
This is implemented by periodically checking for passed code using a
GTK timeout callback."""
TIMEOUT = 100 # Millisecond interval between timeouts.
def __init__(self,argv=None,user_ns=None,user_global_ns=None,
debug=1,shell_class=MTInteractiveShell):
import gtk
## # Check for set_interactive, coming up in new pygtk.
## # Disable it so that this code works, but notify
## # the user that he has a better option as well.
## # XXX TODO better support when set_interactive is released
## try:
## gtk.set_interactive(False)
## print "Your PyGtk has set_interactive(), so you can use the"
## print "more stable single-threaded Gtk mode."
## print "See https://bugs.launchpad.net/ipython/+bug/270856"
## except AttributeError:
## pass
self.gtk = gtk
self.gtk_mainloop = hijack_gtk()
# Allows us to use both Tk and GTK.
self.tk = get_tk()
if gtk.pygtk_version >= (2,4,0): mainquit = self.gtk.main_quit
else: mainquit = self.gtk.mainquit
self.IP = make_IPython(argv,user_ns=user_ns,
user_global_ns=user_global_ns,
debug=debug,
shell_class=shell_class,
on_kill=[mainquit])
# HACK: slot for banner in self; it will be passed to the mainloop
# method only and .run() needs it. The actual value will be set by
# .mainloop().
self._banner = None
threading.Thread.__init__(self)
def mainloop(self,sys_exit=0,banner=None):
self._banner = banner
if self.gtk.pygtk_version >= (2,4,0):
import gobject
gobject.idle_add(self.on_timer)
else:
self.gtk.idle_add(self.on_timer)
if sys.platform != 'win32':
try:
if self.gtk.gtk_version[0] >= 2:
self.gtk.gdk.threads_init()
except AttributeError:
pass
except RuntimeError:
error('Your pyGTK likely has not been compiled with '
'threading support.\n'
'The exception printout is below.\n'
'You can either rebuild pyGTK with threads, or '
'try using \n'
'matplotlib with a different backend (like Tk or WX).\n'
'Note that matplotlib will most likely not work in its '
'current state!')
self.IP.InteractiveTB()
self.start()
self.gtk.gdk.threads_enter()
self.gtk_mainloop()
self.gtk.gdk.threads_leave()
self.join()
def on_timer(self):
"""Called when GTK is idle.
Must return True always, otherwise GTK stops calling it"""
update_tk(self.tk)
self.IP.runcode()
time.sleep(0.01)
return True
class IPShellWX(IPThread):
"""Run a wx mainloop() in a separate thread.
Python commands can be passed to the thread where they will be executed.
This is implemented by periodically checking for passed code using a
GTK timeout callback."""
TIMEOUT = 100 # Millisecond interval between timeouts.
def __init__(self,argv=None,user_ns=None,user_global_ns=None,
debug=1,shell_class=MTInteractiveShell):
self.IP = make_IPython(argv,user_ns=user_ns,
user_global_ns=user_global_ns,
debug=debug,
shell_class=shell_class,
on_kill=[self.wxexit])
wantedwxversion=self.IP.rc.wxversion
if wantedwxversion!="0":
try:
import wxversion
except ImportError:
error('The wxversion module is needed for WX version selection')
else:
try:
wxversion.select(wantedwxversion)
except:
self.IP.InteractiveTB()
error('Requested wxPython version %s could not be loaded' %
wantedwxversion)
import wx
threading.Thread.__init__(self)
self.wx = wx
self.wx_mainloop = hijack_wx()
# Allows us to use both Tk and GTK.
self.tk = get_tk()
# HACK: slot for banner in self; it will be passed to the mainloop
# method only and .run() needs it. The actual value will be set by
# .mainloop().
self._banner = None
self.app = None
def wxexit(self, *args):
if self.app is not None:
self.app.agent.timer.Stop()
self.app.ExitMainLoop()
def mainloop(self,sys_exit=0,banner=None):
self._banner = banner
self.start()
class TimerAgent(self.wx.MiniFrame):
wx = self.wx
IP = self.IP
tk = self.tk
def __init__(self, parent, interval):
style = self.wx.DEFAULT_FRAME_STYLE | self.wx.TINY_CAPTION_HORIZ
self.wx.MiniFrame.__init__(self, parent, -1, ' ', pos=(200, 200),
size=(100, 100),style=style)
self.Show(False)
self.interval = interval
self.timerId = self.wx.NewId()
def StartWork(self):
self.timer = self.wx.Timer(self, self.timerId)
self.wx.EVT_TIMER(self, self.timerId, self.OnTimer)
self.timer.Start(self.interval)
def OnTimer(self, event):
update_tk(self.tk)
self.IP.runcode()
class App(self.wx.App):
wx = self.wx
TIMEOUT = self.TIMEOUT
def OnInit(self):
'Create the main window and insert the custom frame'
self.agent = TimerAgent(None, self.TIMEOUT)
self.agent.Show(False)
self.agent.StartWork()
return True
self.app = App(redirect=False)
self.wx_mainloop(self.app)
self.join()
class IPShellQt(IPThread):
"""Run a Qt event loop in a separate thread.
Python commands can be passed to the thread where they will be executed.
This is implemented by periodically checking for passed code using a
Qt timer / slot."""
TIMEOUT = 100 # Millisecond interval between timeouts.
def __init__(self, argv=None, user_ns=None, user_global_ns=None,
debug=0, shell_class=MTInteractiveShell):
import qt
self.exec_loop = hijack_qt()
# Allows us to use both Tk and QT.
self.tk = get_tk()
self.IP = make_IPython(argv,
user_ns=user_ns,
user_global_ns=user_global_ns,
debug=debug,
shell_class=shell_class,
on_kill=[qt.qApp.exit])
# HACK: slot for banner in self; it will be passed to the mainloop
# method only and .run() needs it. The actual value will be set by
# .mainloop().
self._banner = None
threading.Thread.__init__(self)
def mainloop(self, sys_exit=0, banner=None):
import qt
self._banner = banner
if qt.QApplication.startingUp():
a = qt.QApplication(sys.argv)
self.timer = qt.QTimer()
qt.QObject.connect(self.timer,
qt.SIGNAL('timeout()'),
self.on_timer)
self.start()
self.timer.start(self.TIMEOUT, True)
while True:
if self.IP._kill: break
self.exec_loop()
self.join()
def on_timer(self):
update_tk(self.tk)
result = self.IP.runcode()
self.timer.start(self.TIMEOUT, True)
return result
class IPShellQt4(IPThread):
"""Run a Qt event loop in a separate thread.
Python commands can be passed to the thread where they will be executed.
This is implemented by periodically checking for passed code using a
Qt timer / slot."""
TIMEOUT = 100 # Millisecond interval between timeouts.
def __init__(self, argv=None, user_ns=None, user_global_ns=None,
debug=0, shell_class=MTInteractiveShell):
from PyQt4 import QtCore, QtGui
try:
# present in PyQt4-4.2.1 or later
QtCore.pyqtRemoveInputHook()
except AttributeError:
pass
if QtCore.PYQT_VERSION_STR == '4.3':
warn('''PyQt4 version 4.3 detected.
If you experience repeated threading warnings, please update PyQt4.
''')
self.exec_ = hijack_qt4()
# Allows us to use both Tk and QT.
self.tk = get_tk()
self.IP = make_IPython(argv,
user_ns=user_ns,
user_global_ns=user_global_ns,
debug=debug,
shell_class=shell_class,
on_kill=[QtGui.qApp.exit])
# HACK: slot for banner in self; it will be passed to the mainloop
# method only and .run() needs it. The actual value will be set by
# .mainloop().
self._banner = None
threading.Thread.__init__(self)
def mainloop(self, sys_exit=0, banner=None):
from PyQt4 import QtCore, QtGui
self._banner = banner
if QtGui.QApplication.startingUp():
a = QtGui.QApplication(sys.argv)
self.timer = QtCore.QTimer()
QtCore.QObject.connect(self.timer,
QtCore.SIGNAL('timeout()'),
self.on_timer)
self.start()
self.timer.start(self.TIMEOUT)
while True:
if self.IP._kill: break
self.exec_()
self.join()
def on_timer(self):
update_tk(self.tk)
result = self.IP.runcode()
self.timer.start(self.TIMEOUT)
return result
# A set of matplotlib public IPython shell classes, for single-threaded (Tk*
# and FLTK*) and multithreaded (GTK*, WX* and Qt*) backends to use.
def _load_pylab(user_ns):
"""Allow users to disable pulling all of pylab into the top-level
namespace.
This little utility must be called AFTER the actual ipython instance is
running, since only then will the options file have been fully parsed."""
ip = IPython.ipapi.get()
if ip.options.pylab_import_all:
ip.ex("from matplotlib.pylab import *")
ip.IP.user_config_ns.update(ip.user_ns)
class IPShellMatplotlib(IPShell):
"""Subclass IPShell with MatplotlibShell as the internal shell.
Single-threaded class, meant for the Tk* and FLTK* backends.
Having this on a separate class simplifies the external driver code."""
def __init__(self,argv=None,user_ns=None,user_global_ns=None,debug=1):
IPShell.__init__(self,argv,user_ns,user_global_ns,debug,
shell_class=MatplotlibShell)
_load_pylab(self.IP.user_ns)
class IPShellMatplotlibGTK(IPShellGTK):
"""Subclass IPShellGTK with MatplotlibMTShell as the internal shell.
Multi-threaded class, meant for the GTK* backends."""
def __init__(self,argv=None,user_ns=None,user_global_ns=None,debug=1):
IPShellGTK.__init__(self,argv,user_ns,user_global_ns,debug,
shell_class=MatplotlibMTShell)
_load_pylab(self.IP.user_ns)
class IPShellMatplotlibWX(IPShellWX):
"""Subclass IPShellWX with MatplotlibMTShell as the internal shell.
Multi-threaded class, meant for the WX* backends."""
def __init__(self,argv=None,user_ns=None,user_global_ns=None,debug=1):
IPShellWX.__init__(self,argv,user_ns,user_global_ns,debug,
shell_class=MatplotlibMTShell)
_load_pylab(self.IP.user_ns)
class IPShellMatplotlibQt(IPShellQt):
"""Subclass IPShellQt with MatplotlibMTShell as the internal shell.
Multi-threaded class, meant for the Qt* backends."""
def __init__(self,argv=None,user_ns=None,user_global_ns=None,debug=1):
IPShellQt.__init__(self,argv,user_ns,user_global_ns,debug,
shell_class=MatplotlibMTShell)
_load_pylab(self.IP.user_ns)
class IPShellMatplotlibQt4(IPShellQt4):
"""Subclass IPShellQt4 with MatplotlibMTShell as the internal shell.
Multi-threaded class, meant for the Qt4* backends."""
def __init__(self,argv=None,user_ns=None,user_global_ns=None,debug=1):
IPShellQt4.__init__(self,argv,user_ns,user_global_ns,debug,
shell_class=MatplotlibMTShell)
_load_pylab(self.IP.user_ns)
#-----------------------------------------------------------------------------
# Factory functions to actually start the proper thread-aware shell
def check_gtk(mode):
try:
import gtk
except ImportError:
return mode
if hasattr(gtk,'set_interactive'):
gtk.set_interactive(False)
return 'tkthread'
else:
return mode
def _select_shell(argv):
"""Select a shell from the given argv vector.
This function implements the threading selection policy, allowing runtime
control of the threading mode, both for general users and for matplotlib.
Return:
Shell class to be instantiated for runtime operation.
"""
global USE_TK
mpl_shell = {'gthread' : IPShellMatplotlibGTK,
'wthread' : IPShellMatplotlibWX,
'qthread' : IPShellMatplotlibQt,
'q4thread' : IPShellMatplotlibQt4,
'tkthread' : IPShellMatplotlib, # Tk is built-in
}
th_shell = {'gthread' : IPShellGTK,
'wthread' : IPShellWX,
'qthread' : IPShellQt,
'q4thread' : IPShellQt4,
'tkthread' : IPShell, # Tk is built-in
}
backends = {'gthread' : 'GTKAgg',
'wthread' : 'WXAgg',
'qthread' : 'QtAgg',
'q4thread' :'Qt4Agg',
'tkthread' :'TkAgg',
}
all_opts = set(['tk','pylab','gthread','qthread','q4thread','wthread',
'tkthread'])
user_opts = set([s.replace('-','') for s in argv[:3]])
special_opts = user_opts & all_opts
if 'tk' in special_opts:
USE_TK = True
special_opts.remove('tk')
if 'pylab' in special_opts:
try:
import matplotlib
except ImportError:
error('matplotlib could NOT be imported! Starting normal IPython.')
return IPShell
special_opts.remove('pylab')
# If there's any option left, it means the user wants to force the
# threading backend, else it's auto-selected from the rc file
if special_opts:
th_mode = special_opts.pop()
matplotlib.rcParams['backend'] = backends[th_mode]
else:
backend = matplotlib.rcParams['backend']
if backend.startswith('GTK'):
th_mode = 'gthread'
elif backend.startswith('WX'):
th_mode = 'wthread'
elif backend.startswith('Qt4'):
th_mode = 'q4thread'
elif backend.startswith('Qt'):
th_mode = 'qthread'
else:
# Any other backend, use plain Tk
th_mode = 'tkthread'
# New versions of pygtk don't need the brittle threaded support.
th_mode = check_gtk(th_mode)
return mpl_shell[th_mode]
else:
# No pylab requested, just plain threads
try:
th_mode = special_opts.pop()
except KeyError:
th_mode = 'tkthread'
# New versions of pygtk don't need the brittle threaded support.
th_mode = check_gtk(th_mode)
return th_shell[th_mode]
# This is the one which should be called by external code.
def start(user_ns = None):
"""Return a running shell instance, dealing with threading options.
This is a factory function which will instantiate the proper IPython shell
based on the user's threading choice. Such a selector is needed because
different GUI toolkits require different thread handling details."""
shell = _select_shell(sys.argv)
return shell(user_ns = user_ns)
# Some aliases for backwards compatibility
IPythonShell = IPShell
IPythonShellEmbed = IPShellEmbed
#************************ End of file <Shell.py> ***************************
|
toomoresuch/pysonengine
|
eggs/ipython-0.10.1-py2.6.egg/IPython/Shell.py
|
Python
|
mit
| 46,259
|
[
"Brian"
] |
7ab733639f07b9dd9ee15b0003a9cce73ce01c276f95d43ecfbc13b31a4867fe
|
# -*- coding: utf-8 -*-
# Copyright 2012 splinter authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
import io
import os
import re
import tempfile
import time
from contextlib import contextmanager
import warnings
from selenium.webdriver.common.alert import Alert
from selenium.common.exceptions import (
ElementClickInterceptedException,
NoSuchElementException,
WebDriverException,
StaleElementReferenceException,
TimeoutException,
MoveTargetOutOfBoundsException,
)
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC # NOQA: N812
from selenium.webdriver.support.ui import WebDriverWait
from splinter.driver import DriverAPI, ElementAPI
from splinter.driver.find_links import FindLinks
from splinter.driver.xpath_utils import _concat_xpath_from_str
from splinter.element_list import ElementList
from splinter.driver.webdriver.cookie_manager import CookieManager
from splinter.exceptions import ElementDoesNotExist
# Patch contextmanager onto Selenium's Alert
def alert_enter(self):
return self
def alert_exit(self, type, value, traceback): # NOQA: A002
pass
Alert.__enter__ = alert_enter
Alert.__exit__ = alert_exit
Alert.fill_with = Alert.send_keys
class switch_window: # NOQA: N801
def __init__(self, browser, window_handle):
self.browser = browser
self.window_handle = window_handle
def __enter__(self):
self.current_window_handle = self.browser.driver.current_window_handle
self.browser.driver.switch_to.window(self.window_handle)
def __exit__(self, type, value, traceback): # NOQA: A002
if self.current_window_handle in self.browser.driver.window_handles:
self.browser.driver.switch_to.window(self.current_window_handle)
class Window:
""" A class representing a browser window """
def __init__(self, browser, name):
self._browser = browser
self.name = name
@property
def title(self):
""" The title of this window """
with switch_window(self._browser, self.name):
return self._browser.title
@property
def url(self):
""" The url of this window """
with switch_window(self._browser, self.name):
return self._browser.url
@property
def index(self):
""" The index of this window in browser.windows """
return self._browser.driver.window_handles.index(self.name)
@property
def prev(self):
""" Return the previous window """
prev_index = self.index - 1
prev_handle = self._browser.driver.window_handles[prev_index]
return Window(self._browser, prev_handle)
@property # NOQA: A003
def next(self): # NOQA: A003
""" Return the next window """
next_index = (self.index + 1) % len(self._browser.driver.window_handles)
next_handle = self._browser.driver.window_handles[next_index]
return Window(self._browser, next_handle)
def is_current():
doc = "Whether this window is currently the browser's active window."
def fget(self):
return self._browser.driver.current_window_handle == self.name
def fset(self, value):
if value is True:
self._browser.driver.switch_to.window(self.name)
else:
raise TypeError("can only set to True")
return locals()
is_current = property(**is_current())
def new_tab(self, url):
""" Open new tab in current window """
if self._browser.driver.name == 'firefox':
self._browser.driver.get('about:config')
self._browser.driver.execute_script('document.getElementById("warningButton").click();')
self._browser.driver.execute_script(
"""
Components.classes['@mozilla.org/preferences-service;1']
.getService(Components.interfaces.nsIPrefBranch)
.setIntPref('browser.link.open_newwindow', 3);
""")
self._browser.driver.execute_script("window.open('%s', '_blank');" % url)
if self._browser.driver.name == 'firefox':
self._browser.driver.execute_script(
"""
Components.classes['@mozilla.org/preferences-service;1']
.getService(Components.interfaces.nsIPrefBranch)
.setIntPref('browser.link.open_newwindow', 2);
""")
self._browser.driver.back()
def close(self):
""" Close this window. If this window is active, switch to previous window """
target = self.prev if (self.is_current and self.prev != self) else None
with switch_window(self._browser, self.name):
self._browser.driver.close()
if target is not None:
target.is_current = True
def close_others(self):
self.is_current = True
for window in self._browser.windows:
if window != self:
window.close()
def __eq__(self, other):
return self._browser == other._browser and self.name == other.name
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "<Window %s: %s>" % (self.name, self.url)
class Windows:
""" A class representing all open browser windows """
def __init__(self, browser):
self._browser = browser
def __len__(self):
return len(self._browser.driver.window_handles)
def __getitem__(self, key):
window_handles = self._browser.driver.window_handles
try:
return Window(self._browser, window_handles[key])
except TypeError:
if key not in window_handles:
raise KeyError(key)
return Window(self._browser, key)
def current():
doc = "The currently active window"
def fget(self):
current_handle = self._browser.driver.current_window_handle
return Window(self._browser, current_handle) if current_handle else None
def fset(self, value):
self._browser.driver.switch_to.window(value.name)
return locals()
current = property(**current())
def __repr__(self):
return str(
[
Window(self._browser, handle)
for handle in self._browser.driver.window_handles
]
)
def _find(self, finder, finder_kwargs=None):
"""Search for elements. Returns a list of results.
Arguments:
finder: The function to use for the element search.
finder_kwargs: Keyword Arguments for the finder function.
Returns:
list
"""
finder_kwargs = finder_kwargs or {}
elements = None
elem_list = []
try:
elements = finder(**finder_kwargs)
if not isinstance(elements, list):
elements = [elements]
except (
NoSuchElementException,
StaleElementReferenceException,
):
# This exception is sometimes thrown if the page changes
# quickly
pass
if elements:
elem_list = [self.element_class(element, self) for element in elements]
return elem_list
def find_by(
self,
finder,
finder_kwargs=None,
original_find: str = None,
original_query: str = None,
wait_time: int = None,
):
"""Wrapper for finding elements.
Must be attached to a class.
Returns:
ElementList
"""
elem_list = []
func_name = getattr(getattr(finder, "__func__"), "__name__")
find_by = original_find or func_name[func_name.rfind("_by_") + 4:]
query = original_query or finder_kwargs.get('value')
# Zero second wait time means only check once
if wait_time == 0:
elem_list = _find(self, finder, finder_kwargs)
else:
wait_time = wait_time or self.wait_time
end_time = time.time() + wait_time
while time.time() < end_time:
elem_list = _find(self, finder, finder_kwargs)
if elem_list:
break
return ElementList(elem_list, find_by=find_by, query=query)
class BaseWebDriver(DriverAPI):
driver = None
find_by = find_by
def __init__(self, driver=None, wait_time=2):
self.wait_time = wait_time
self.links = FindLinks(self)
self.driver = driver
self.element_class = WebDriverElement
self._cookie_manager = CookieManager(self.driver)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.quit()
@property
def title(self):
return self.driver.title
@property
def html(self):
return self.driver.page_source
@property
def url(self):
return self.driver.current_url
@property
def status_code(self):
raise NotImplementedError
def visit(self, url):
self.driver.get(url)
def back(self):
self.driver.back()
def forward(self):
self.driver.forward()
def reload(self):
self.driver.refresh()
def execute_script(self, script, *args):
return self.driver.execute_script(script, *args)
def evaluate_script(self, script, *args):
return self.driver.execute_script("return %s" % script, *args)
def is_element_visible(self, finder, selector, wait_time=None):
wait_time = wait_time or self.wait_time
end_time = time.time() + wait_time
while time.time() < end_time:
if finder(selector, wait_time=wait_time) and finder(selector, wait_time=wait_time).visible:
return True
return False
def is_element_not_visible(self, finder, selector, wait_time=None):
wait_time = wait_time or self.wait_time
end_time = time.time() + wait_time
while time.time() < end_time:
element = finder(selector, wait_time=0)
if not element or (element and not element.visible):
return True
return False
def is_element_visible_by_css(self, css_selector, wait_time=None):
return self.is_element_visible(self.find_by_css, css_selector, wait_time)
def is_element_not_visible_by_css(self, css_selector, wait_time=None):
return self.is_element_not_visible(self.find_by_css, css_selector, wait_time)
def is_element_visible_by_xpath(self, xpath, wait_time=None):
return self.is_element_visible(self.find_by_xpath, xpath, wait_time)
def is_element_not_visible_by_xpath(self, xpath, wait_time=None):
return self.is_element_not_visible(self.find_by_xpath, xpath, wait_time)
def is_element_present(self, finder, selector, wait_time=None):
wait_time = wait_time or self.wait_time
end_time = time.time() + wait_time
while time.time() < end_time:
if finder(selector, wait_time=wait_time):
return True
return False
def is_element_not_present(self, finder, selector, wait_time=None):
wait_time = wait_time or self.wait_time
end_time = time.time() + wait_time
while time.time() < end_time:
if not finder(selector, wait_time=0):
return True
return False
def is_element_present_by_css(self, css_selector, wait_time=None):
return self.is_element_present(self.find_by_css, css_selector, wait_time)
def is_element_not_present_by_css(self, css_selector, wait_time=None):
return self.is_element_not_present(self.find_by_css, css_selector, wait_time)
def is_element_present_by_xpath(self, xpath, wait_time=None):
return self.is_element_present(self.find_by_xpath, xpath, wait_time)
def is_element_not_present_by_xpath(self, xpath, wait_time=None):
return self.is_element_not_present(self.find_by_xpath, xpath, wait_time)
def is_element_present_by_tag(self, tag, wait_time=None):
return self.is_element_present(self.find_by_tag, tag, wait_time)
def is_element_not_present_by_tag(self, tag, wait_time=None):
return self.is_element_not_present(self.find_by_tag, tag, wait_time)
def is_element_present_by_name(self, name, wait_time=None):
return self.is_element_present(self.find_by_name, name, wait_time)
def is_element_not_present_by_name(self, name, wait_time=None):
return self.is_element_not_present(self.find_by_name, name, wait_time)
def is_element_present_by_value(self, value, wait_time=None):
return self.is_element_present(self.find_by_value, value, wait_time)
def is_element_not_present_by_value(self, value, wait_time=None):
return self.is_element_not_present(self.find_by_value, value, wait_time)
def is_element_present_by_text(self, text, wait_time=None):
return self.is_element_present(self.find_by_text, text, wait_time)
def is_element_not_present_by_text(self, text, wait_time=None):
return self.is_element_not_present(self.find_by_text, text, wait_time)
def is_element_present_by_id(self, id, wait_time=None): # NOQA: A002
return self.is_element_present(self.find_by_id, id, wait_time)
def is_element_not_present_by_id(self, id, wait_time=None): # NOQA: A002
return self.is_element_not_present(self.find_by_id, id, wait_time)
def get_alert(self, wait_time=None):
wait_time = wait_time or self.wait_time
try:
alert = WebDriverWait(self.driver, wait_time).until(EC.alert_is_present())
return alert
except TimeoutException:
return None
def _is_text_present(self, text):
try:
self.find_by_tag("body").text.index(text)
return True
except (NoSuchElementException, StaleElementReferenceException, ValueError):
# NoSuchElementException will be thrown if the body tag isn't present
# Can occur if the page isn't fully loaded yet.
# StaleElementReferenceException will be thrown if the page changes quickly
pass
return False
def is_text_present(self, text, wait_time=None):
wait_time = wait_time or self.wait_time
end_time = time.time() + wait_time
while time.time() < end_time:
if self._is_text_present(text):
return True
return False
def is_text_not_present(self, text, wait_time=None):
wait_time = wait_time or self.wait_time
end_time = time.time() + wait_time
while time.time() < end_time:
if not self._is_text_present(text):
return True
return False
@contextmanager
def get_iframe(self, frame_reference):
# If a WebDriverElement is provided, send the underlying element
if isinstance(frame_reference, WebDriverElement):
frame_reference = frame_reference._element
self.driver.switch_to.frame(frame_reference)
try:
yield self
finally:
self.driver.switch_to.frame(None)
def find_option_by_value(self, value):
return self.find_by_xpath(
'//option[@value="%s"]' % value,
original_find="option by value",
original_query=value,
)
def find_option_by_text(self, text):
return self.find_by_xpath(
'//option[normalize-space(text())="%s"]' % text,
original_find="option by text",
original_query=text,
)
def find_link_by_href(self, href):
warnings.warn(
'browser.find_link_by_href is deprecated.'
' Use browser.links.find_by_href instead.',
FutureWarning,
)
return self.links.find_by_href(href)
def find_link_by_partial_href(self, partial_href):
warnings.warn(
'browser.find_link_by_partial_href is deprecated.'
' Use browser.links.find_by_partial_href instead.',
FutureWarning,
)
return self.links.find_by_partial_href(partial_href)
def find_link_by_partial_text(self, partial_text):
warnings.warn(
'browser.find_link_by_partial_text is deprecated.'
' Use browser.links.find_by_partial_text instead.',
FutureWarning,
)
return self.links.find_by_partial_text(partial_text)
def find_link_by_text(self, text):
warnings.warn(
'browser.find_link_by_text is deprecated.'
' Use browser.links.find_by_text instead.',
FutureWarning,
)
return self.links.find_by_text(text)
def find_by_css(self, css_selector, wait_time=None):
return self.find_by(
self.driver.find_elements,
finder_kwargs={'by': By.CSS_SELECTOR, 'value': css_selector},
original_find="css",
original_query=css_selector,
wait_time=wait_time,
)
def find_by_xpath(self, xpath, original_find=None, original_query=None, wait_time=None):
original_find = original_find or "xpath"
original_query = original_query or xpath
return self.find_by(
self.driver.find_elements,
finder_kwargs={'by': By.XPATH, 'value': xpath},
original_find=original_find,
original_query=original_query,
wait_time=wait_time,
)
def find_by_name(self, name, wait_time=None):
return self.find_by(
self.driver.find_elements,
finder_kwargs={'by': By.NAME, 'value': name},
original_find='name',
wait_time=wait_time,
)
def find_by_tag(self, tag, wait_time=None):
return self.find_by(
self.driver.find_elements,
finder_kwargs={'by': By.TAG_NAME, 'value': tag},
original_find='tag_name',
wait_time=wait_time,
)
def find_by_value(self, value, wait_time=None):
elem = self.find_by_xpath(
'//*[@value="{}"]'.format(value),
original_find="value",
original_query=value,
wait_time=wait_time,
)
if elem:
return elem
return self.find_by_xpath('//*[.="%s"]' % value)
def find_by_text(self, text=None, wait_time=None):
xpath_str = _concat_xpath_from_str(text)
return self.find_by_xpath(
xpath_str,
original_find="text",
original_query=text,
wait_time=wait_time,
)
def find_by_id(self, id, wait_time=None): # NOQA: A002
return self.find_by(
self.driver.find_element,
finder_kwargs={'by': By.ID, 'value': id},
original_find='id',
wait_time=wait_time,
)
def fill(self, name, value):
field = self.find_by_name(name).first
field.value = value
attach_file = fill
def fill_form(self, field_values, form_id=None, name=None, ignore_missing=False):
form = None
if name is not None:
form = self.find_by_name(name)
if form_id is not None:
form = self.find_by_id(form_id)
for name, value in field_values.items():
try:
if form:
elements = form.find_by_name(name)
else:
elements = self.find_by_name(name)
element = elements.first
if (
element["type"] in ["text", "password", "tel"]
or element.tag_name == "textarea"
):
element.value = value
elif element["type"] == "checkbox":
if value:
element.check()
else:
element.uncheck()
elif element["type"] == "radio":
for field in elements:
if field.value == value:
field.click()
elif element._element.tag_name == "select":
element.select(value)
else:
element.value = value
except ElementDoesNotExist as e:
if not ignore_missing:
raise ElementDoesNotExist(e)
def type(self, name, value, slowly=False): # NOQA: A003
element = self.find_by_name(name).first._element
if slowly:
return TypeIterator(element, value)
element.send_keys(value)
return value
def choose(self, name, value):
fields = self.find_by_name(name)
for field in fields:
if field.value == value:
field.click()
def check(self, name):
self.find_by_name(name).first.check()
def uncheck(self, name):
self.find_by_name(name).first.uncheck()
def screenshot(self, name="", suffix=".png", full=False, unique_file=True):
filename = '{}{}'.format(name, suffix)
if unique_file:
(fd, filename) = tempfile.mkstemp(prefix=name, suffix=suffix)
# Don't hold the file
os.close(fd)
if full:
ori_window_size = self.driver.get_window_size()
self.full_screen()
self.driver.get_screenshot_as_file(filename)
if full:
self.recover_screen(ori_window_size)
return filename
def html_snapshot(self, name="", suffix=".html", encoding='utf-8', unique_file=True):
filename = '{}{}'.format(name, suffix)
if unique_file:
(fd, filename) = tempfile.mkstemp(prefix=name, suffix=suffix)
# Don't hold the file
os.close(fd)
with io.open(filename, 'w', encoding=encoding) as f:
f.write(self.html)
return filename
def select(self, name, value):
self.find_by_xpath(
'//select[@name="%s"]//option[@value="%s"]' % (name, value)
).first._element.click()
def select_by_text(self, name, text):
self.find_by_xpath(
'//select[@name="%s"]/option[text()="%s"]' % (name, text)
).first._element.click()
def quit(self): # NOQA: A003
try:
self.driver.quit()
except WebDriverException:
pass
def full_screen(self):
width = self.driver.execute_script("return Math.max(document.body.scrollWidth, document.body.offsetWidth);")
height = self.driver.execute_script("return Math.max(document.body.scrollHeight, document.body.offsetHeight);")
self.driver.set_window_size(width, height)
def recover_screen(self, size):
width = size.get('width')
height = size.get('height')
self.driver.set_window_size(width, height)
@property
def cookies(self):
return self._cookie_manager
@property
def windows(self):
return Windows(self)
class TypeIterator(object):
def __init__(self, element, keys):
self._element = element
self._keys = keys
def __iter__(self):
for key in self._keys:
self._element.send_keys(key)
yield key
class WebDriverElement(ElementAPI):
find_by = find_by
def __init__(self, element, parent):
self._element = element
self.parent = parent
self.driver = self.parent.driver
self.wait_time = self.parent.wait_time
self.element_class = self.parent.element_class
self.links = FindLinks(self)
def _get_value(self):
return self["value"] or self._element.text
def _set_value(self, value):
if self._element.get_attribute("type") != "file":
self._element.clear()
self._element.send_keys(value)
value = property(_get_value, _set_value)
@property
def text(self):
return self._element.text
@property
def tag_name(self):
return self._element.tag_name
def clear(self):
if self._element.get_attribute("type") in [
"email",
"number",
"password",
"search",
"tel",
"text",
"textarea",
"url",
]:
self._element.clear()
def fill(self, value):
self.value = value
def select(self, value=None, text=None):
finder = None
search_value = None
if text:
finder = 'text()'
search_value = text
elif value:
finder = '@value'
search_value = value
self.find_by_xpath(
'.//option[{}="{}"]'.format(finder, search_value)
)._element.click()
def select_by_text(self, text):
self.select(text=text)
def type(self, value, slowly=False): # NOQA: A003
if slowly:
return TypeIterator(self._element, value)
self._element.send_keys(value)
return value
def click(self):
"""Click an element.
If the element is not interactive due to being covered by another
element, the click will retry for self.parent.wait_time amount of
time.
"""
end_time = time.time() + self.parent.wait_time
error = None
while time.time() < end_time:
try:
return self._element.click()
except(
ElementClickInterceptedException,
WebDriverException,
) as e:
error = e
raise error
def check(self):
if not self.checked:
self.click()
def uncheck(self):
if self.checked:
self.click()
@property
def checked(self):
return self._element.is_selected()
selected = checked
@property
def visible(self):
return self._element.is_displayed()
@property
def html(self):
return self["innerHTML"]
@property
def outer_html(self):
return self["outerHTML"]
def find_by_css(self, selector, wait_time=None):
return self.find_by(
self._element.find_elements,
finder_kwargs={'by': By.CSS_SELECTOR, 'value': selector},
original_find="css",
wait_time=wait_time,
)
def find_by_xpath(self, selector, wait_time=None, original_find="xpath", original_query=None):
return self.find_by(
self._element.find_elements,
finder_kwargs={'by': By.XPATH, 'value': selector},
original_find=original_find,
original_query=original_query,
wait_time=wait_time,
)
def find_by_name(self, selector, wait_time=None):
return self.find_by(
self._element.find_elements,
finder_kwargs={'by': By.NAME, 'value': selector},
original_find="name",
wait_time=wait_time,
)
def find_by_tag(self, selector, wait_time=None):
return self.find_by(
self._element.find_elements,
finder_kwargs={'by': By.TAG_NAME, 'value': selector},
original_find="tag",
wait_time=wait_time,
)
def find_by_value(self, value, wait_time=None):
selector = '[value="{}"]'.format(value)
return self.find_by(
self._element.find_elements,
finder_kwargs={'by': By.CSS_SELECTOR, 'value': selector},
original_find="value",
original_query=value,
wait_time=wait_time,
)
def find_by_text(self, text, wait_time=None):
# Add a period to the xpath to search only inside the parent.
xpath_str = '.{}'.format(_concat_xpath_from_str(text))
return self.find_by(
self._element.find_elements,
finder_kwargs={'by': By.XPATH, 'value': xpath_str},
original_find="text",
original_query=text,
wait_time=wait_time,
)
def find_by_id(self, selector, wait_time=None):
return self.find_by(
self._element.find_elements,
finder_kwargs={'by': By.ID, 'value': selector},
original_find="id",
wait_time=wait_time,
)
def has_class(self, class_name):
return bool(
re.search(r"(?:^|\s)" + re.escape(class_name) + r"(?:$|\s)", self["class"])
)
def scroll_to(self):
"""
Scroll to the current element.
"""
self.driver.execute_script("arguments[0].scrollIntoView(true);", self._element)
def mouse_over(self):
"""
Performs a mouse over the element.
"""
self.scroll_to()
ActionChains(self.driver).move_to_element(self._element).perform()
def mouse_out(self):
"""
Performs a mouse out the element.
"""
self.scroll_to()
size = self._element.size
try:
# Fails on left edge of viewport
ActionChains(self.driver).move_to_element_with_offset(
self._element, -10, -10).click().perform()
except MoveTargetOutOfBoundsException:
ActionChains(self.driver).move_to_element_with_offset(
self._element, size['width'] + 10, 10).click().perform()
def double_click(self):
"""
Performs a double click in the element.
"""
self.scroll_to()
ActionChains(self.driver).double_click(self._element).perform()
def right_click(self):
"""
Performs a right click in the element.
"""
self.scroll_to()
ActionChains(self.driver).context_click(self._element).perform()
def drag_and_drop(self, droppable):
"""
Performs drag a element to another elmenet.
"""
self.scroll_to()
ActionChains(self.driver).drag_and_drop(self._element, droppable._element).perform()
def _full_screen(self):
width = self.driver.execute_script("return Math.max(document.body.scrollWidth, document.body.offsetWidth);")
height = self.driver.execute_script("return Math.max(document.body.scrollHeight, document.body.offsetHeight);")
self.driver.set_window_size(width, height)
def screenshot(self, name='', suffix='.png', full=False, unique_file=True):
filename = '{}{}'.format(name, suffix)
if unique_file:
(fd, filename) = tempfile.mkstemp(prefix=name, suffix=suffix)
# don't hold the file
os.close(fd)
if full:
ori_window_size = self.driver.get_window_size()
self._full_screen()
target = self.screenshot_as_png()
if full:
# Restore screen size
width = ori_window_size.get('width')
height = ori_window_size.get('height')
self.driver.set_window_size(width, height)
target.save(filename)
return filename
def screenshot_as_png(self):
try:
from PIL import Image
except ImportError:
raise NotImplementedError('Element screenshot need the Pillow dependency. '
'Please use "pip install Pillow" install it.')
full_screen_png = self.driver.get_screenshot_as_png()
full_screen_bytes = io.BytesIO(full_screen_png)
im = Image.open(full_screen_bytes)
im_width, im_height = im.size[0], im.size[1]
window_size = self.driver.get_window_size()
window_width = window_size['width']
ratio = im_width * 1.0 / window_width
height_ratio = im_height / ratio
im = im.resize((int(window_width), int(height_ratio)))
location = self._element.location
x, y = location['x'], location['y']
pic_size = self._element.size
w, h = pic_size['width'], pic_size['height']
box = x, y, x + w, y + h
box = [int(i) for i in box]
target = im.crop(box)
return target
def __getitem__(self, attr):
return self._element.get_attribute(attr)
|
cobrateam/splinter
|
splinter/driver/webdriver/__init__.py
|
Python
|
bsd-3-clause
| 32,153
|
[
"VisIt"
] |
0e2042aad155b8683c48c9b540afc306b07f2e84ed11cf9646f3495c613997b9
|
#
# Copyright (c) 2016 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import print_function
from __future__ import absolute_import
from os.path import dirname
from os.path import abspath
from os.path import getsize
from os.path import getmtime
from os.path import join
from os.path import exists
from commoncode import fileutils
scan_src_dir = abspath(dirname(__file__))
src_dir = dirname(scan_src_dir)
root_dir = dirname(src_dir)
cache_dir = join(root_dir, '.cache')
scans_cache_dir = join(cache_dir, 'scan_results_caches')
if not exists(scans_cache_dir):
fileutils.create_dir(scans_cache_dir)
from pkg_resources import get_distribution, DistributionNotFound
try:
__version__ = get_distribution('scancode-toolkit').version
except DistributionNotFound:
# package is not installed ??
__version__ = '2.0.0rc3'
|
yasharmaster/scancode-toolkit
|
src/scancode/__init__.py
|
Python
|
apache-2.0
| 2,135
|
[
"VisIt"
] |
057d3c12ba3258952d1c4df7cd11121b7d82d1f6e19f0c33afa6efce8b09e954
|
#!/usr/bin/env python2
#
# cmladvent.py -- CML2 configurator adventure-game front end
# by Eric S. Raymond, <esr@thyrsus.com>
#
# This illustrates how easy it is to wrap a front end around cmlsystem.
# Purely in the interests of science, of course...
#
import sys
if sys.version[0] < '2':
print "Python 2.0 or later is required for this program."
sys.exit(0)
import os, string, getopt, cmd, time, whrandom, random
import cml, cmlsystem
# Globals
debug = 0
proflog = partialsave = None
banner = ""
gruestate = darkturns = 0
lanternloc = None
configfile = None
configuration = None
directions = ('n','e','s','w','ne','sw','se','nw','dn','up')
# User-visible strings in the configurator. Separated out in order to
# support internationalization.
_eng = {
# Strings used in the command help -- these should align
"LHELP":"look [target] -- look here or at target (direction or option).",
"NHELP":"nearby -- list nearby rooms (useful with go)",
"GHELP":"go -- go to a named menu (follow with the label).",
"IHELP":"inventory -- show which options you have picked up.",
"THELP":"take [module] -- set options, follow with option names.",
"SETHELP":"set -- set numeric or string; follow with symbol and value.",
"DHELP":"drop -- unset options, follow with option names or `all'.",
"LDHELP":"load -- read in a configuration (follow with the filename).",
"SHELP":"save -- save the configuration (follow with a filename).",
"XYZZY":"xyzzy -- toggle suppression flag.",
"QHELP":"quit -- quit, discarding changes.",
"XHELP":"exit -- exit, saving the configuration.",
# Grue/lantern messages
"BRASSOFF":"A brass lantern (unlit).",
"BRASSON":"A brass lantern (lit).",
"DARK":"It is very dark. If you continue, you are likely to be eaten by a grue.",
"EATEN":"*CHOMP*! You have been eaten by a slavering grue. Game over.",
"GLOW":"The lantern radiates a mellow golden light.",
"LANTERN":"lantern",
"LANTERNDROP":"Lantern: dropped.",
"LANTERNTAKE":"Lantern: taken.",
"LONGLANTERN":"A brass lantern is here.",
"LANTERNHELP":"""
You see a brass lantern with a ring-shaped handle, hooded and paned with
clear glass. A toggle on the lamp connects to a firestriker inside it.
On the bottom is stamped a maker's mark that reads:
Another fine product of FrobozzCo.
Made in Plumbat, Great Underground Empire
""",
# Other strings
"ABORTED":"Configurator aborted.",
"BADOPTION":"cmladvent: unknown option on command line.\n",
"BOOLEAN":"`y' and `n' can only be applied to booleans or tristates",
"CANNOTSET":" Can't assign this value for bool or trit symbol.",
"CONSTRAINTS":"Constraints:",
"DEFAULT":"Default: ",
"DERIVED":"Symbol %s is derived and cannot be set.",
"DIRHELP":"You can move in compass directions n,e,w,s,ne,nw,se,sw, up, or dn for down.",
"DONE":"Done",
"DROPPED":"%s: dropped.",
"EFFECTS":"Side effects:",
"EH?":"Eh?",
"EXIT":"Exit",
"EXITS":"Passages exit up, %s.",
"EXTRAROOMS":"Other nearby rooms are: %s.",
"GOODBYE":"You emerge, blinking, into the daylight.",
"INROOM":"In %s room.",
"INVISIBLE":"Symbol is invisible",
"ISHERE":"There is an option named %s here.",
"LOADFAIL":"Loading '%s' failed, continuing...",
"MDISABLED":"Module-valued symbols are not enabled",
"MNOTVALID":" m is not a valid value for %s",
"NEW":"(NEW)",
"NNOTVALID":" n is not a valid value for %s",
"NOANCEST":"No ancestors.",
"NOBUTTON":"I don't see button %s here.",
"NOCMDLINE":"%s is the wrong type to be set from the command line",
"NODEPS":"No dependents.",
"NODIR":"You see nothing special in that direction.",
"NOFILE":"cmlconfigure: '%s' does not exist or is unreadable.",
"NOHAVE":"You don't have %s.",
"NOHELP":"No help available for %s",
"NOHERE":"I see no `%s' here.",
"NOMATCHES":"No matches.",
"NONEXIST":"No such location.",
"NOSUCHAS":"No such thing as",
"NOTSAVED":"Configuration not saved",
"NOWAY":"You can't go in that direction from here.",
"OUTOFBOUNDS":"Legal values are in %s",
"PARAMS":" Config = %s, prefix = %s",
"PASSAGEALL":"Passages lead off in all directions.",
"PASSAGEUP":"A passage leads upwards.",
"PHELP":"press -- press a button (follow with the button name).",
"POSTMORTEM":"The ruleset was inconsistent. A state dump is in the file `post.mortem'.",
"REALLY":"Really exit without saving?",
"ROLLBACK":"%s=%s would have violated these requirements:",
"ROOMBANNER":"The %s room. A sign reads `%s'.",
"SAVEAS":"Save As...",
"SAVEEND":"Done",
"SAVESTART":"Saving %s",
"SAVING":"Saving...",
"SHOW_ANC":"Show ancestors of symbol: ",
"SHOW_DEP":"Show dependents of symbol: ",
"SIDEEFFECTS":"Side Effects",
"SIDEFROM":"Side effects from %s:",
"SUPPRESSOFF":"Suppression turned off.",
"SUPPRESSON":"Suppression turned on.",
"SYMUNKNOWN":"cmlconfigure: unknown symbol %s\n",
"TAKEN":"%s: taken.",
"TRIT":"`m' can only be applied to tristates",
"TRYPRESS":"That doesn't work. You might try pressing another button.",
"TWISTY":"You are in a maze of twisty little %s menus, all different.",
"USESET":"What? Configure %s with your bare hands?",
"VALUE":"Value of %s is %s.",
"VISIBLE":"Symbol is visible.",
"VISIBILITY":"Visibility: ",
"WALLCHOICE":"There is a row of buttons on the wall of this room. They read:",
"WALLDEFAULT":"The button marked %s is pressed.",
"WELCOME":"Welcome to CML2 Adventure, version %s.",
# General help
"GENHELP":"""Welcome to the adventure configurator. For a command summary, type `commands'.
In general, a three-letter abbreviation of any command word is sufficient
to identify it to the parser.
This interface emulates the style of classic text adventure games such as
Colossal Cave Adventure and Zork. Configuration menus are rooms, and
configuration options are objects that can be taken and dropped (except
for choice/radiobutton symbols, which become buttons on various room walls).
Objects and rooms may silently appear and disappear as visibilities
change.
Have fun, and beware of the lurking grue!
"""
}
grafitti = (
'N tensvggb ernqf: "Gur Jhzchf jnf urer. Nera\'g lbh tynq ur\'f abg urer abj?"',
'N tensvggb ernqf: "Uryyb, fnvybe!"',
'N tensvggb ernqf: "Sebqb yvirf!"',
'N tensvggb ernqf: "Guvf fcnpr sbe erag."',
'N tensvggb ernqf: "Guvf Jnl gb gur Rterff..."',
# Bofpher Pbybffny Pnir Nqiragher ersreraprf ortva urer.
'Ba bar jnyy vf n tynff-sebagrq obk ubyqvat na nkr.\aBa gur tynff vf jevggra: "OERNX TYNFF VA PNFR BS QJNEIRF"',
'N tensvggb ernqf: "Srr svr sbr sbb!',
# Bofpher Mbex ersreraprf ortva urer.
'N tensvggb ernqf: "Ragunevba gur Jvfr fyrcg urer."',
'N tensvggb ernqf: "N mbexzvq fnirq vf n mbexzvq rnearq."',
'Bar jnyy qvfcynlf n sbezny cbegenvg bs W. Cvrecbag Syngurnq.',
'Bar jnyy qvfcynlf n qhfgl cbegenvg bs gur Rzcrebe Zhzob VV.',
'Bar jnyy qvfcynlf n cvpgher bs gur terng tenabyn fzrygref bs Cyhzong.',
'Bar jnyy qvfcynlf n gnpxl oynpx-iryirg cnvagvat bs n tbyqra-sheerq zvak jvgu uhtr rlrf.',
# Bofpher Q&Q ersreraprf ortva urer
'N tensvggb ernqf: "Vg pbhyq bayl or orggre ng Pnfgyr Terlunjx"',
'N tensvggb ernqf: "Cnenylfvf vf va gur rlr bs gur orubyqre"',
# Bofpher wbxr sbe QrPnzc/Cengg snaf
'N tensvggb ernqf: "Lativ vf n ybhfr!"',
# Abg-fb-bofpher Yvahk ersreraprf ortva urer.
'Ba bar jnyy vf n cubgbtencu bs Yvahf Gbeinyqf, qevaxvat Thvaarff.',
'N jnyy oenpxrg ubyqf n qvfpneqrq cnve bs Nyna Pbk\'f fhatynffrf. Oebamrq.',
'Ba bar jnyy vf n cbegenvg bs EZF va shyy Fg. Vtahpvhf qent.',
'Ba bar jnyy vf n cvpgher bs Yneel Jnyy ubyqvat n ynetr chzcxva.',
'Ba bar jnyy vf jung nccrnef gb or n cubgbtencu bs Thvqb\'f gvzr znpuvar.',
'Gur sybbe vf yvggrerq jvgu fcrag .45 furyyf. Revp Enlzbaq zhfg unir orra urer.',
)
grafittishuffle = []
grafitticount = 0
# Eventually, do more intelligent selection using LOCALE
lang = _eng
def roll(n):
"Return a random number in the range 0..n-1."
return random.randrange(n)
def shuffle(size):
"Generate a random permutation of 0...(size - 1)."
shuffle = range(size)
for i in range(1, size+1):
j = random.randrange(i)
holder = shuffle[i - 1]
shuffle[i - 1] = shuffle[j]
shuffle[j] = holder
return shuffle
def rot13(str):
res = ""
for c in str:
if c in string.uppercase:
res += chr(ord('A') + ((ord(c)-ord('A')) + 13) % 26)
elif c in string.lowercase:
res += chr(ord('a') + ((ord(c)-ord('a')) + 13) % 26)
else:
res += c
return res
def newroom(room):
# There is a chance of grafitti
global grafitticount, grafittishuffle
if grafitticount < len(grafitti):
if not hasattr(room, "visits") and roll(3) == 0:
room.grafitti = grafitti[grafittishuffle[grafitticount]]
grafitticount += 1
# State machine for lantern and grue
global lanternloc, gruestate, darkturns
if gruestate == 0: # Initial state
if not hasattr(room, "visits") and roll(4) == 0:
gruestate += 1
lanternloc = room
elif gruestate == 1: # Lantern has been placed
if roll(4) == 0:
gruestate += 1
elif gruestate == 2: # It's dark now
darkturns += 1
if darkturns > 2 and roll(4) == 0:
print lang["EATEN"]
raise SystemExit
def visit(room, level=0):
"Visit a room, and describe at any of four verbosity levels."
# 0 = quiet, 1 = name only, 2 = name + help,
# 3 = name + help + exits, 4 = name + help + exits + contents
configuration.visit(room)
# Compute visible exits
room.exits = filter(lambda x: x.type in ("menu", "choices"), room.items)
room.exits = filter(configuration.is_visible, room.exits)
# This way of assigning directions has the defect that they may
# change as submenus become visible/invisible. Unfortunately,
# the alternative is not being able to assign directions at all
# for long menus.
room.directions = {}
for (dir,other) in zip(directions[:-1], room.exits):
room.directions[dir] = other
if level == 0:
return
elif level == 1:
print lang["INROOM"] % room.name
else:
print lang["ROOMBANNER"] % (room.name, room.prompt)
# Only display room exits at level 3 or up
if level >= 3:
if len(room.exits) > 9:
print lang["PASSAGEALL"]
elif room.exits:
print lang["EXITS"] % ", ".join(room.directions.keys())
elif room != configuration.start:
print lang["PASSAGEUP"]
print
# Display help at level 2 or up
help = room.help()
if help:
sys.stdout.write(help)
# Display grafitti at level 2 or up.
if hasattr(room, "grafitti"):
print rot13(room.grafitti) + "\n"
# Only display other contents of room at level 4 or up
if level >= 4:
if room.type == "choices":
print lang["WALLCHOICE"]
print ", ".join(map(lambda x:x.name, room.items))
print lang["WALLDEFAULT"] % room.menuvalue.name
else:
for symbol in room.items:
if symbol.is_symbol() and configuration.is_visible(symbol) and not symbol.eval():
print lang["ISHERE"] % symbol.name
# Some things are always shown
if lanternloc == room:
print lang["LONGLANTERN"]
if gruestate == 2:
print lang["DARK"]
def inventory():
# Write mutable symbols, including defaulted modular symbols.
configuration.module_suppress = 0
if lanternloc == 'user':
if gruestate == 3:
print lang["BRASSON"]
else:
print lang["BRASSOFF"]
__inventory_recurse(configuration.start)
if configuration.trit_tie:
configuration.module_suppress = (configuration.trit_tie.eval() == cml.m)
# Write all derived symbols
#config_sh.write(configuration.lang["SHDERIVED"])
#for entry in configuration.dictionary.values():
# if entry.is_derived():
# __inventory_recurse(entry, config_sh)
def __inventory_recurse(node):
if not configuration.saveable(node):
return
elif node.items:
for child in node.items:
__inventory_recurse(child)
elif node.type != 'message':
symname = configuration.prefix + node.name
value = node.eval(configuration.debug)
if not value or not node.setcount:
return
try:
if node.type == "decimal":
sys.stdout.write("%s=%d\n" % (symname, value))
elif node.type == "hexadecimal":
sys.stdout.write("%s=0x%x\n" % (symname, value))
elif node.type == "string":
sys.stdout.write("%s=\"%s\"\n" % (symname, value))
elif node.type in ("bool", "trit"):
sys.stdout.write("%s=%s\n" % (symname, `value`))
except:
(errtype, errval, errtrace) = sys.exc_info()
print "Internal error %s while writing %s." % (errtype, node)
raise SystemExit, 1
class advent_menu(cmd.Cmd):
"Adventure-game interface class."
def set_symbol(self, symbol, value, freeze=0):
"Set the value of a symbol -- line-oriented error messages."
if symbol.is_numeric() and symbol.range:
if not configuration.range_check(symbol, value):
print lang["OUTOFBOUNDS"] % (symbol.range,)
return 0
(ok, effects, violations) = configuration.set_symbol(symbol, value, freeze)
if effects:
print lang["EFFECTS"]
sys.stdout.write(string.join(effects, "\n") + "\n\n")
if not ok:
print lang["ROLLBACK"] % (symbol.name, value)
sys.stdout.write(string.join(violations, "\n") + "\n")
return ok
def __init__(self, myconfigfile=None, mybanner=""):
cmd.Cmd.__init__(self)
self.configfile = myconfigfile
if mybanner and configuration.banner.find("%s") > -1:
self.banner = configuration.banner % mybanner
elif banner:
self.banner = mybanner
else:
self.banner = configuration.banner
self.current = configuration.start;
self.prompt = "> "
print lang["TWISTY"]%(configuration.banner,)
self.last = None
visit(configuration.start, 4)
def do_look(self, line):
if not line: # Look at where we are
visit(self.current, 4)
elif line == "up": # Look up
if self.current == configuration.start:
print lang["NODIR"]
else:
visit(self.current.menu, 2)
elif line in directions: # Look in a direction
if line in self.current.directions.keys():
visit(self.current.directions[line], 2)
else:
print lang["NODIR"]
# Look at an option
elif line in map(lambda x: x.name, filter(lambda x: x.is_logical(), self.current.items)):
symbol = configuration.dictionary[line]
print lang["VALUE"] % (line, symbol.eval())
help = symbol.help()
if help:
sys.stdout.write(help)
else:
print lang["NOHERE"] % line
do_loo = do_look
def do_nearby(self, dummy):
if self.current != configuration.start:
print lang["ROOMBANNER"] % (self.current.menu.name, self.current.menu.prompt)
for (dir, symbol) in self.current.directions.items():
if symbol.type in ("menu", "choices") and configuration.is_visible(symbol):
print ("%-2s: " % dir) + lang["ROOMBANNER"] % (symbol.name, symbol.prompt)
if len(self.current.exits) > len(directions):
print lang["EXTRAROOMS"] % ", ".join(map(lambda x: x.name, self.current.exits[9:]))
print
do_nea = do_nearby
def do_go(self, symname):
if not symname:
print lang["EH?"]
return
symbol = configuration.dictionary.get(symname)
if symbol and symbol.type in ("menu", "choices"):
self.current = symbol
if not configuration.is_visible(self.current) and not self.current.frozen():
print lang["SUPPRESSOFF"]
self.suppressions = 0
else:
print lang["NONEXIST"]
def do_dir(self, dir):
to = self.current.directions.get(dir)
if to:
self.current = to
else:
print lang["NOWAY"]
def do_n(self, dummy): self.do_dir('n')
def do_e(self, dummy): self.do_dir('e')
def do_w(self, dummy): self.do_dir('w')
def do_s(self, dummy): self.do_dir('s')
def do_ne(self, dummy): self.do_dir('ne')
def do_nw(self, dummy): self.do_dir('nw')
def do_se(self, dummy): self.do_dir('se')
def do_sw(self, dummy): self.do_dir('sw')
def do_u(self, dummy): self.do_up(dummy)
def do_d(self, dummy): self.do_dir('dn')
def do_up(self, dummy):
if self.current == configuration.start:
print lang["GOODBYE"]
raise SystemExit
else:
self.current = self.current.menu
def do_inventory(self, dummy):
inventory()
do_inv = do_inventory
do_i = do_inventory
def do_drop(self, line):
global lanternloc, gruestate
if not line:
print lang["EH?"]
return
words = line.lower().split()
if words == ["all"] and self.current.type != "choices":
words = map(lambda x:x.name, filter(lambda x:x.is_logical() and configuration.is_visible(x) and not x.eval(), self.current.items))
if lanternloc == 'user':
words.append(lang["LANTERN"])
for thing in words:
if thing == lang["LANTERN"]:
lanternloc = self.current
gruestate = 1
print lang["LANTERNDROP"]
else:
symbolname = thing.upper()
symbol = configuration.dictionary.get(symbolname)
if not symbol:
print lang["NOSUCHAS"], symbolname
continue
elif not symbol.eval():
print lang["NOHAVE"] % symbolname
continue
elif symbol.menu.type == "choices":
if symbol.menu != self.current:
print lang["NOBUTTON"] % symbolname
else:
print lang["TRYPRESS"]
return
elif symbol.is_logical():
ok = self.set_symbol(symbol, cml.n)
elif symbol.is_numeric():
ok = self.set_symbol(symbol, 0)
elif symbol.type == "string":
ok = self.set_symbol(symbol, "")
if ok:
print lang["DROPPED"] % symbol.name
do_dro = do_drop
def do_take(self, line):
global lanternloc
if not line:
print lang["EH?"]
return
words = line.lower().split()
if words == ["all"] and self.current.type != "choices":
words = map(lambda x:x.name, filter(lambda x:x.is_logical() and configuration.is_visible(x) and not x.eval(), self.current.items))
if lanternloc == self.current:
words.append(lang["LANTERN"])
if ("module" in words):
tritval = cml.m
words.remove("module")
else:
tritval = cml.y
for thing in words:
if thing == lang["LANTERN"]:
lanternloc = 'user'
print lang["LANTERNTAKE"]
else:
symbolname = thing.upper()
symbol = configuration.dictionary.get(symbolname)
if not symbol:
print lang["NOSUCHAS"], symbolname
elif symbol.menu != self.current:
print lang["NOHERE"] % symbol.name
elif symbol.is_logical():
if self.set_symbol(symbol, tritval):
print lang["TAKEN"] % symbol.name
else:
print lang["USESET"] % symbol.name
do_tak = do_take
def do_press(self, line):
if not line:
print lang["EH?"]
else:
symbol = configuration.dictionary.get(line)
if not symbol or symbol.menu != self.current:
print lang["NOHERE"] % line
else:
self.set_symbol(symbol, cml.y)
do_pus = do_push = do_pre = do_press
def do_light(self, dummy):
global gruestate
if lanternloc == 'user':
print lang["GLOW"]
gruestate = 3
else:
print lang["NOHERE"] % lang["LANTERN"]
do_lig = do_light
def do_set(self, line):
symbol = None
try:
(symname, value) = line.split()
symbol = configuration.dictionary[symname]
except:
print lang["EH?"]
if not symbol:
print lang["NOSUCHAS"], symbol.name
elif symbol.menu != self.current:
print lang["NOHERE"] % symbol.name
elif symbol.menu.type == "choices" or symbol.is_logical():
print lang["CANTDO"]
elif symbol.is_numeric():
self.set_symbol(symbol, int(value))
elif symbol.type == "string":
self.set_symbol(symbol, value)
def do_xyzzy(self, dummy):
# Toggle the suppressions flag
configuration.suppressions = not configuration.suppressions
if configuration.suppressions:
print lang["SUPPRESSON"]
else:
print lang["SUPPRESSOFF"]
return 0
def do_load(self, line):
if not line:
print lang["EH?"]
return
file = string.strip(line)
if file.find(' ') > -1:
(file, option) = file.split(' ')
try:
(changes, errors) = configuration.load(file, freeze=(option == "frozen"))
except IOError:
print lang["LOADFAIL"] % file
else:
if errors:
print errors
print lang["INCCHANGES"] % (changes,file)
if configuration.side_effects:
sys.stdout.write(string.join(configuration.side_effects, "\n") + "\n")
do_loa = do_load
def do_save(self, line):
if not line:
print lang["EH?"]
return
file = string.strip(line)
failure = configuration.save(file, cml.Baton(lang["SAVESTART"] % file, lang["SAVEEND"]))
if failure:
print failure
do_sav = do_save
def do_exit(self, dummy):
# Terminate this cmd instance, saving configuration
self.do_s(configfile)
return 1
do_exi = do_exit
def do_quit(self, line):
# Terminate this cmd instance, not saving configuration
return 1
do_qui = do_quit
# Debugging commands -- not documented
def do_verbose(self, line):
# Set the debug flag
if not line:
configuration.debug += 1
else:
configuration.debug = int(line)
return 0
do_ver = do_verbose
def do_examine(self, line):
# Examine the state of a given symbol
symbol = string.strip(line)
if configuration.dictionary.has_key(symbol):
entry = configuration.dictionary[symbol]
print entry
if entry.constraints:
print lang["CONSTRAINTS"]
for wff in entry.constraints:
print cml.display_expression(wff)
if configuration.is_visible(entry):
print lang["VISIBLE"]
else:
print lang["INVISIBLE"]
help = entry.help()
if help:
print help
else:
print lang["NOHELP"] % (entry.name,)
elif symbol == "lantern":
if lanternloc == "user" or lanternloc == self.current:
print lang["LANTERNHELP"]
else:
print lang["NOHERE"] % lang["LANTERN"]
else:
print lang["NOSUCHAS"], symbol
return 0
do_exa = do_examine
def emptyline(self):
return 0
def do_commands(self, dummy):
print string.join(map(lambda x: lang[x],
("LHELP", "NHELP", "GHELP", "IHELP",
"DHELP", "THELP", "PHELP", "SETHELP",
"LDHELP", "SHELP", "XYZZY",
"QHELP", "XHELP", "DIRHELP")),
"\n")
def help_look(self):
print lang["LHELP"]
help_loo = help_look
def help_nearby(self):
print lang["NHELP"]
help_nea = help_nearby
def help_go(self):
print lang["GHELP"]
def help_inventory(self):
print lang["IHELP"]
help_inv = help_inventory
def help_drop(self):
print lang["DHELP"]
help_dro = help_drop
def help_take(self):
print lang["THELP"]
help_tak = help_take
def help_press(self):
print lang["PHELP"]
help_pus = help_push = help_pre = help_press
def help_set(self):
print lang["SETHELP"]
def help_xyzzy(self):
print lang["XYZZY"]
def help_load(self):
print lang["LDHELP"]
help_loa = help_load
def help_save(self):
print lang["SHELP"]
help_sav = help_save
def help_quit(self):
print lang["QHELP"]
help_qui = help_quit
def help_exit(self):
print lang["XHELP"]
help_exi = help_exit
def do_help(self, dummy):
print lang["GENHELP"]
def postcmd(self, stop, dummy):
if stop:
return stop
if self.current != self.last:
newroom(self.current)
visit(self.current, 4 - 3 * (self.current.visits > 1))
self.last = self.current
return None
# Rulebase loading and option processing
def load_system(cmd_options, cmd_arguments):
"Read in the rulebase and handle command-line arguments."
global debug, configfile, configuration
debug = 0;
configfile = None
if not cmd_arguments:
rulebase = "rules.out"
else:
rulebase = cmd_arguments[0]
try:
open(rulebase, 'rb')
except IOError:
print lang["NOFILE"] % (rulebase,)
raise SystemExit
configuration = cmlsystem.CMLSystem(rulebase)
process_options(configuration, cmd_options)
configuration.debug_emit(1, lang["PARAMS"] % (configfile,configuration.prefix))
# Perhaps the user needs modules enabled initially
if configuration.trit_tie and cml.evaluate(configuration.trit_tie):
configuration.trits_enabled = 1
# Don't count all these automatically generated settings
# for purposes of figuring out whether we should confirm a quit.
configuration.commits = 0
return configuration
def process_include(myconfiguration, file, freeze):
"Process a -i or -I inclusion option."
# Failure to find an include file is non-fatal
try:
(changes, errors) = myconfiguration.load(file, freeze)
except IOError:
print lang["LOADFAIL"] % file
return
if errors:
print errors
elif myconfiguration.side_effects:
print lang["SIDEFROM"] % file
sys.stdout.write(string.join(myconfiguration.side_effects, "\n") + "\n")
def process_define(myconfiguration, val, freeze):
"Process a -d=xxx or -D=xxx option."
parts = string.split(val, "=")
sym = parts[0]
if myconfiguration.dictionary.has_key(sym):
sym = myconfiguration.dictionary[sym]
else:
myconfiguration.errout.write(lang["SYMUNKNOWN"] % (`sym`,))
sys.exit(1)
if sym.is_derived():
myconfiguration.debug_emit(1, lang["DERIVED"] % (`sym`,))
sys.exit(1)
elif sym.is_logical():
if len(parts) == 1:
val = 'y'
elif parts[1] == 'y':
val = 'y'
elif parts[1] == 'm':
myconfiguration.trits_enabled = 1
val = 'm'
elif parts[1] == 'n':
val = 'n'
elif len(parts) == 1:
print lang["NOCMDLINE"] % (`sym`,)
sys.exit(1)
else:
val = parts[1]
(ok, effects, violations) = myconfiguration.set_symbol(sym,
myconfiguration.value_from_string(sym, val),
freeze)
if effects:
print lang["EFFECTS"]
sys.stdout.write(string.join(effects, "\n") + "\n\n")
if not ok:
print lang["ROLLBACK"] % (sym.name, val)
sys.stdout.write(string.join(violations,"\n")+"\n")
def process_options(myconfiguration, options):
# Process command-line options second so they override
global list, configfile, debug, banner
configfile = "config.out"
for (switch, val) in options:
if switch == '-B':
banner = val
elif switch == '-d':
process_define(myconfiguration, val, freeze=0)
elif switch == '-D':
process_define(myconfiguration, val, freeze=1)
elif switch == '-i':
process_include(myconfiguration, val, freeze=0)
elif switch == '-I':
process_include(myconfiguration, val, freeze=1)
elif switch == '-l':
list = 1
elif switch == '-o':
configfile = val
elif switch == '-v':
debug = debug + 1
myconfiguration.debug = myconfiguration.debug + 1
elif switch == '-S':
myconfiguration.suppressions = 0
# Main sequence -- isolated here so we can profile it
def main(options, arguments):
global configuration
try:
myconfiguration = load_system(options, arguments)
except KeyboardInterrupt:
raise SystemExit
# Set seed for random-number functions
whrandom.seed(int(time.time()) % 256, os.getpid() % 256, 23)
global grafittishuffle
grafittishuffle = shuffle(len(grafitti))
print lang["WELCOME"] % cml.version
myconfiguration.errout = sys.stdout
advent_menu(configfile, banner).cmdloop()
if __name__ == '__main__':
try:
runopts = "aB:cD:d:h:i:I:lo:P:R:StVvWx"
(options,arguments) = getopt.getopt(sys.argv[1:], runopts)
if os.environ.has_key("CML2OPTIONS"):
(envopts, envargs) = getopt.getopt(
os.environ["CML2OPTIONS"].split(),
runopts)
options = envopts + options
except:
print lang["BADOPTION"]
sys.exit(1)
for (switch, val) in options:
if switch == "-V":
print "cmladvent", cml.version
raise SystemExit
elif switch == '-P':
proflog = val
try:
import readline
except:
pass
try:
if proflog:
import profile, pstats
profile.run("main(options, arguments)", proflog)
else:
main(options, arguments)
except KeyboardInterrupt:
#if configuration.commits > 0:
# print lang["NOTSAVED"]
print lang["ABORTED"]
except "UNSATISFIABLE":
#configuration.save("post.mortem")
print lang["POSTMORTEM"]
raise SystemExit, 1
# That's all, folks!
|
jserv/codezero
|
tools/cml2-tools/cmladvent.py
|
Python
|
gpl-3.0
| 31,343
|
[
"VisIt"
] |
115acb0acbeec54135d376d561f8b07a1f96808b6177d23015856a3dab703663
|
""" StorageManagerHandler is the implementation of the StorageManagementDB in the DISET framework """
__RCSID__ = "$Id$"
from types import IntType, DictType, ListType, StringType, LongType
from DIRAC import gLogger, S_OK
from DIRAC.Core.DISET.RequestHandler import RequestHandler
from DIRAC.StorageManagementSystem.DB.StorageManagementDB import StorageManagementDB
# This is a global instance of the StorageDB
storageDB = False
def initializeStorageManagerHandler( serviceInfo ):
global storageDB
storageDB = StorageManagementDB()
return storageDB._checkTable()
class StorageManagerHandler( RequestHandler ):
######################################################################
#
# Example call back methods
#
types_updateTaskStatus = []
def export_updateTaskStatus( self, sourceID, status, successful = [], failed = [] ):
""" An example to show the usage of the callbacks. """
gLogger.info( "updateTaskStatus: Received callback information for ID %s" % sourceID )
gLogger.info( "updateTaskStatus: Status = '%s'" % status )
if successful:
gLogger.info( "updateTaskStatus: %s files successfully staged" % len( successful ) )
for lfn, time in successful:
gLogger.info( "updateTaskStatus: %s %s" % ( lfn.ljust( 100 ), time.ljust( 10 ) ) )
if failed:
gLogger.info( "updateTaskStatus: %s files failed to stage" % len( successful ) )
for lfn, time in failed:
gLogger.info( "updateTaskStatus: %s %s" % ( lfn.ljust( 100 ), time.ljust( 10 ) ) )
return S_OK()
######################################################################
#
# Monitoring methods
#
types_getTaskStatus = [IntType]
def export_getTaskStatus( self, taskID ):
""" Obtain the status of the stage task from the DB. """
res = storageDB.getTaskStatus( taskID )
if not res['OK']:
gLogger.error( 'getTaskStatus: Failed to get task status', res['Message'] )
return res
types_getTaskInfo = [IntType]
def export_getTaskInfo( self, taskID ):
""" Obtain the metadata of the stage task from the DB. """
res = storageDB.getTaskInfo( taskID )
if not res['OK']:
gLogger.error( 'getTaskInfo: Failed to get task metadata', res['Message'] )
return res
types_getTaskSummary = [IntType]
def export_getTaskSummary( self, taskID ):
""" Obtain the summary of the stage task from the DB. """
res = storageDB.getTaskSummary( taskID )
if not res['OK']:
gLogger.error( 'getTaskSummary: Failed to get task summary', res['Message'] )
return res
types_getTasks = [DictType]
def export_getTasks( self, condDict, older = None, newer = None, timeStamp = 'LastUpdate', orderAttribute = None, limit = None ):
""" Get the replicas known to the DB. """
res = storageDB.getTasks( condDict = condDict, older = older, newer = newer, timeStamp = timeStamp, orderAttribute = orderAttribute, limit = limit )
if not res['OK']:
gLogger.error( 'getTasks: Failed to get Cache replicas', res['Message'] )
return res
types_removeStageRequests = [ListType]
def export_removeStageRequests( self, replicaIDs):
res = storageDB.removeStageRequests( replicaIDs )
if not res['OK']:
gLogger.error( 'removeStageRequests: Failed to remove StageRequests', res['Message'] )
return res
types_getCacheReplicas = [DictType]
def export_getCacheReplicas( self, condDict, older = None, newer = None, timeStamp = 'LastUpdate', orderAttribute = None, limit = None ):
""" Get the replcias known to the DB. """
res = storageDB.getCacheReplicas( condDict = condDict, older = older, newer = newer, timeStamp = timeStamp, orderAttribute = orderAttribute, limit = limit )
if not res['OK']:
gLogger.error( 'getCacheReplicas: Failed to get Cache replicas', res['Message'] )
return res
types_getStageRequests = [DictType]
def export_getStageRequests( self, condDict, older = None, newer = None, timeStamp = 'StageRequestSubmitTime', orderAttribute = None, limit = None ):
""" Get the replcias known to the DB. """
res = storageDB.getStageRequests( condDict = condDict, older = older, newer = newer, timeStamp = timeStamp, orderAttribute = orderAttribute, limit = limit )
if not res['OK']:
gLogger.error( 'getStageRequests: Failed to get Cache replicas', res['Message'] )
return res
#
# Monitoring methods
#
######################################################################
####################################################################
#
# setRequest is used to initially insert tasks and their associated files. Leaves files in New status.
#
types_setRequest = [DictType, StringType, StringType, IntType]
def export_setRequest( self, lfnDict, source, callbackMethod, taskID ):
""" This method allows stage requests to be set into the StagerDB """
res = storageDB.setRequest( lfnDict, source, callbackMethod, taskID )
if not res['OK']:
gLogger.error( 'setRequest: Failed to set stage request', res['Message'] )
return res
####################################################################
#
# The state transition of Replicas method
#
types_updateReplicaStatus = [ListType, StringType]
def export_updateReplicaStatus( self, replicaIDs, newReplicaStatus ):
""" This allows to update the status of replicas """
res = storageDB.updateReplicaStatus( replicaIDs, newReplicaStatus )
if not res['OK']:
gLogger.error( 'updateReplicaStatus: Failed to update replica status', res['Message'] )
return res
####################################################################
#
# The state transition of the Replicas from New->Waiting
#
types_updateReplicaInformation = [ListType]
def export_updateReplicaInformation( self, replicaTuples ):
""" This method sets the pfn and size for the supplied replicas """
res = storageDB.updateReplicaInformation( replicaTuples )
if not res['OK']:
gLogger.error( 'updateRelicaInformation: Failed to update replica information', res['Message'] )
return res
####################################################################
#
# The state transition of the Replicas from Waiting->StageSubmitted
#
types_getStagedReplicas = []
def export_getStagedReplicas( self ):
""" This method obtains the replicas for which all replicas in the task are Staged/StageSubmitted """
res = storageDB.getStagedReplicas()
if not res['OK']:
gLogger.error( 'getStagedReplicas: Failed to obtain Staged/StageSubmitted replicas', res['Message'] )
return res
types_getWaitingReplicas = []
def export_getWaitingReplicas( self ):
""" This method obtains the replicas for which all replicas in the task are Waiting """
res = storageDB.getWaitingReplicas()
if not res['OK']:
gLogger.error( 'getWaitingReplicas: Failed to obtain Waiting replicas', res['Message'] )
return res
types_getOfflineReplicas = []
def export_getOfflineReplicas( self ):
""" This method obtains the replicas for which all replicas in the task are Offline """
res = storageDB.getOfflineReplicas()
if not res['OK']:
gLogger.error( 'getOfflineReplicas: Failed to obtain Offline replicas', res['Message'] )
return res
types_getSubmittedStagePins = []
def export_getSubmittedStagePins( self ):
""" This method obtains the number of files and size of the requests submitted for each storage element """
res = storageDB.getSubmittedStagePins()
if not res['OK']:
gLogger.error( 'getSubmittedStagePins: Failed to obtain submitted request summary', res['Message'] )
return res
types_insertStageRequest = [DictType, [IntType, LongType]]
def export_insertStageRequest( self, requestReplicas, pinLifetime ):
""" This method inserts the stage request ID assocaited to supplied replicaIDs """
res = storageDB.insertStageRequest( requestReplicas, pinLifetime )
if not res['OK']:
gLogger.error( 'insertStageRequest: Failed to insert stage request information', res['Message'] )
return res
####################################################################
#
# The state transition of the Replicas from StageSubmitted->Staged
#
types_setStageComplete = [ListType]
def export_setStageComplete( self, replicaIDs ):
""" This method updates the status of the stage request for the supplied replica IDs """
res = storageDB.setStageComplete( replicaIDs )
if not res['OK']:
gLogger.error( 'setStageComplete: Failed to set StageRequest complete', res['Message'] )
return res
####################################################################
#
# The methods for finalization of tasks
#
# Daniela: useless method
'''types_updateStageCompletingTasks = []
def export_updateStageCompletingTasks(self):
""" This method checks whether the file for Tasks in 'StageCompleting' status are all Staged and updates the Task status to Staged """
res = storageDB.updateStageCompletingTasks()
if not res['OK']:
gLogger.error('updateStageCompletingTasks: Failed to update StageCompleting tasks.',res['Message'])
return res
'''
types_setTasksDone = [ListType]
def export_setTasksDone( self, taskIDs ):
""" This method sets the status in the Tasks table to Done for the list of supplied task IDs """
res = storageDB.setTasksDone( taskIDs )
if not res['OK']:
gLogger.error( 'setTasksDone: Failed to set status of tasks to Done', res['Message'] )
return res
types_removeTasks = [ListType]
def export_removeTasks( self, taskIDs ):
""" This method removes the entries from TaskReplicas and Tasks with the supplied task IDs """
res = storageDB.removeTasks( taskIDs )
if not res['OK']:
gLogger.error( 'removeTasks: Failed to remove Tasks', res['Message'] )
return res
types_removeUnlinkedReplicas = []
def export_removeUnlinkedReplicas( self ):
""" This method removes Replicas which have no associated Tasks """
res = storageDB.removeUnlinkedReplicas()
if not res['OK']:
gLogger.error( 'removeUnlinkedReplicas: Failed to remove unlinked Replicas', res['Message'] )
return res
####################################################################
#
# The state transition of the Replicas from *->Failed
#
types_updateReplicaFailure = [DictType]
def export_updateReplicaFailure( self, replicaFailures ):
""" This method sets the status of the replica to failed with the supplied reason """
res = storageDB.updateReplicaFailure( replicaFailures )
if not res['OK']:
gLogger.error( 'updateRelicaFailure: Failed to update replica failure information', res['Message'] )
return res
####################################################################
#
# Methods for obtaining Tasks, Replicas with supplied state
#
types_getTasksWithStatus = [StringType]
def export_getTasksWithStatus( self, status ):
""" This method allows to retrieve Tasks with the supplied status """
res = storageDB.getTasksWithStatus( status )
if not res['OK']:
gLogger.error( 'getTasksWithStatus: Failed to get tasks with %s status' % status, res['Message'] )
return res
types_getReplicasWithStatus = [StringType]
def export_getReplicasWithStatus( self, status ):
""" This method allows to retrieve replicas with the supplied status """
res = storageDB.getCacheReplicas( {'Status':status} )
if not res['OK']:
gLogger.error( 'getReplicasWithStatus: Failed to get replicas with %s status' % status, res['Message'] )
return res
types_getStageSubmittedReplicas = []
def export_getStageSubmittedReplicas( self ):
""" This method obtains the replica metadata and the stage requestID for the replicas in StageSubmitted status """
res = storageDB.getCacheReplicas( {'Status':'StageSubmitted'} )
if not res['OK']:
gLogger.error( 'getStageSubmittedReplicas: Failed to obtain StageSubmitted replicas', res['Message'] )
return res
types_wakeupOldRequests = [ListType, IntType ]
def export_wakeupOldRequests( self, oldRequests, retryInterval ):
""" get only StageRequests with StageRequestSubmitTime older than 1 day AND are still not staged
delete these requests
reset Replicas with corresponding ReplicaIDs to Status='New'
"""
res = storageDB.wakeupOldRequests( oldRequests, retryInterval )
if not res['OK']:
gLogger.error( 'wakeupOldRequests: Failed to wake up old requests', res['Message'] )
return res
types_setOldTasksAsFailed = [IntType]
def export_setOldTasksAsFailed( self, daysOld ):
"""
Set Tasks older than "daysOld" number of days to Failed
These tasks have already been retried every day for staging
"""
res = storageDB.setOldTasksAsFailed( daysOld )
if not res['OK']:
gLogger.error( 'setOldTasksAsFailed: Failed to set old Tasks to Failed state. ', res['Message'] )
return res
types_getAssociatedReplicas = [ListType]
def export_getAssociatedReplicas( self, replicaIDs ):
"""
Retrieve the list of Replicas that belong to the same Tasks as the provided list
"""
res = storageDB.getAssociatedReplicas( replicaIDs )
if not res['OK']:
gLogger.error( 'getAssociatedReplicas: Failed to get Associated Replicas. ', res['Message'] )
return res
types_killTasksBySourceTaskID = [ListType]
def export_killTasksBySourceTaskID(self, sourceTaskIDs ):
""" Given SourceTaskIDs (jobIDs), this will cancel further staging of files for the corresponding tasks"""
res = storageDB.killTasksBySourceTaskID( sourceTaskIDs )
if not res['OK']:
gLogger.error( 'removeTasks: Failed to kill staging', res['Message'] )
return res
types_getCacheReplicasSummary = []
def export_getCacheReplicasSummary(self):
""" Reports breakdown of file number/size in different staging states across storage elements """
res = storageDB.getCacheReplicasSummary()
if not res['OK']:
gLogger.error(' getCacheReplicasSummary: Failed to retrieve summary from server', res['Message'])
return res
|
avedaee/DIRAC
|
StorageManagementSystem/Service/StorageManagerHandler.py
|
Python
|
gpl-3.0
| 14,253
|
[
"DIRAC"
] |
ed9bb57304f526753a9454383900ca953d5df83723ad7d7a1d674c3fbc5dc147
|
# Copyright(c) 2009, Gentoo Foundation
# Copyright: 2006-2008 Brian Harring <ferringb@gmail.com>
#
# License: GPL2/BSD
# $Header$
import unittest
try:
from test import test_support
except ImportError:
from test import support as test_support
from gentoolkit.atom import *
from gentoolkit.test import cmp
"""Atom test suite (verbatim) from pkgcore."""
class TestGentoolkitAtom(unittest.TestCase):
def assertEqual2(self, o1, o2):
# logic bugs hidden behind short circuiting comparisons for metadata
# is why we test the comparison *both* ways.
self.assertEqual(o1, o2)
c = cmp(o1, o2)
self.assertEqual(c, 0,
msg="checking cmp for %r, %r, aren't equal: got %i" % (o1, o2, c))
self.assertEqual(o2, o1)
c = cmp(o2, o1)
self.assertEqual(c, 0,
msg="checking cmp for %r, %r,aren't equal: got %i" % (o2, o1, c))
def assertNotEqual2(self, o1, o2):
# is why we test the comparison *both* ways.
self.assertNotEqual(o1, o2)
c = cmp(o1, o2)
self.assertNotEqual(c, 0,
msg="checking cmp for %r, %r, not supposed to be equal, got %i"
% (o1, o2, c))
self.assertNotEqual(o2, o1)
c = cmp(o2, o1)
self.assertNotEqual(c, 0,
msg="checking cmp for %r, %r, not supposed to be equal, got %i"
% (o2, o1, c))
def test_comparison(self):
self.assertEqual2(Atom('cat/pkg'), Atom('cat/pkg'))
self.assertNotEqual2(Atom('cat/pkg'), Atom('cat/pkgb'))
self.assertNotEqual2(Atom('cata/pkg'), Atom('cat/pkg'))
self.assertNotEqual2(Atom('cat/pkg'), Atom('!cat/pkg'))
self.assertEqual2(Atom('!cat/pkg'), Atom('!cat/pkg'))
self.assertNotEqual2(Atom('=cat/pkg-0.1:0'),
Atom('=cat/pkg-0.1'))
self.assertNotEqual2(Atom('=cat/pkg-1[foon]'),
Atom('=cat/pkg-1'))
self.assertEqual2(Atom('=cat/pkg-0'), Atom('=cat/pkg-0'))
self.assertNotEqual2(Atom('<cat/pkg-2'), Atom('>cat/pkg-2'))
self.assertNotEqual2(Atom('=cat/pkg-2*'), Atom('=cat/pkg-2'))
# Portage Atom doesn't have 'negate_version' capability
#self.assertNotEqual2(Atom('=cat/pkg-2', True), Atom('=cat/pkg-2'))
# use...
self.assertNotEqual2(Atom('cat/pkg[foo]'), Atom('cat/pkg'))
self.assertNotEqual2(Atom('cat/pkg[foo]'),
Atom('cat/pkg[-foo]'))
self.assertEqual2(Atom('cat/pkg[foo,-bar]'),
Atom('cat/pkg[-bar,foo]'))
# repoid not supported by Portage Atom yet
## repoid
#self.assertEqual2(Atom('cat/pkg::a'), Atom('cat/pkg::a'))
#self.assertNotEqual2(Atom('cat/pkg::a'), Atom('cat/pkg::b'))
#self.assertNotEqual2(Atom('cat/pkg::a'), Atom('cat/pkg'))
# slots.
self.assertNotEqual2(Atom('cat/pkg:1'), Atom('cat/pkg'))
self.assertEqual2(Atom('cat/pkg:2'), Atom('cat/pkg:2'))
# http://dev.gentoo.org/~tanderson/pms/eapi-2-approved/pms.html#x1-190002.1.2
self.assertEqual2(Atom('cat/pkg:AZaz09+_.-'), Atom('cat/pkg:AZaz09+_.-'))
for lesser, greater in (('0.1', '1'), ('1', '1-r1'), ('1.1', '1.2')):
self.assertTrue(Atom('=d/b-%s' % lesser) <
Atom('=d/b-%s' % greater),
msg="d/b-%s < d/b-%s" % (lesser, greater))
self.assertFalse(Atom('=d/b-%s' % lesser) >
Atom('=d/b-%s' % greater),
msg="!: d/b-%s < d/b-%s" % (lesser, greater))
self.assertTrue(Atom('=d/b-%s' % greater) >
Atom('=d/b-%s' % lesser),
msg="d/b-%s > d/b-%s" % (greater, lesser))
self.assertFalse(Atom('=d/b-%s' % greater) <
Atom('=d/b-%s' % lesser),
msg="!: d/b-%s > d/b-%s" % (greater, lesser))
#self.assertTrue(Atom("!!=d/b-1", eapi=2) > Atom("!=d/b-1"))
self.assertTrue(Atom("!=d/b-1") < Atom("!!=d/b-1"))
self.assertEqual(Atom("!=d/b-1"), Atom("!=d/b-1"))
def test_intersects(self):
for this, that, result in [
('cat/pkg', 'pkg/cat', False),
('cat/pkg', 'cat/pkg', True),
('cat/pkg:1', 'cat/pkg:1', True),
('cat/pkg:1', 'cat/pkg:2', False),
('cat/pkg:1', 'cat/pkg[foo]', True),
('cat/pkg[foo]', 'cat/pkg[-bar]', True),
('cat/pkg[foo]', 'cat/pkg[-foo]', False),
('>cat/pkg-3', '>cat/pkg-1', True),
('>cat/pkg-3', '<cat/pkg-3', False),
('>=cat/pkg-3', '<cat/pkg-3', False),
('>cat/pkg-2', '=cat/pkg-2*', True),
# Portage vercmp disagrees with this one:
#('<cat/pkg-2_alpha1', '=cat/pkg-2*', True),
('=cat/pkg-2', '=cat/pkg-2', True),
('=cat/pkg-3', '=cat/pkg-2', False),
('=cat/pkg-2', '>cat/pkg-2', False),
('=cat/pkg-2', '>=cat/pkg-2', True),
('~cat/pkg-2', '~cat/pkg-2', True),
('~cat/pkg-2', '~cat/pkg-2.1', False),
('=cat/pkg-2*', '=cat/pkg-2.3*', True),
('>cat/pkg-2.4', '=cat/pkg-2*', True),
('<cat/pkg-2.4', '=cat/pkg-2*', True),
('<cat/pkg-1', '=cat/pkg-2*', False),
('~cat/pkg-2', '>cat/pkg-2-r1', True),
('~cat/pkg-2', '<=cat/pkg-2', True),
('=cat/pkg-2-r2*', '<=cat/pkg-2-r20', True),
('=cat/pkg-2-r2*', '<cat/pkg-2-r20', True),
('=cat/pkg-2-r2*', '<=cat/pkg-2-r2', True),
('~cat/pkg-2', '<cat/pkg-2', False),
('=cat/pkg-1-r10*', '~cat/pkg-1', True),
('=cat/pkg-1-r1*', '<cat/pkg-1-r1', False),
('=cat/pkg-1*', '>cat/pkg-2', False),
('>=cat/pkg-8.4', '=cat/pkg-8.3.4*', False),
# Repos not yet supported by Portage
#('cat/pkg::gentoo', 'cat/pkg', True),
#('cat/pkg::gentoo', 'cat/pkg::foo', False),
('=sys-devel/gcc-4.1.1-r3', '=sys-devel/gcc-3.3*', False),
('=sys-libs/db-4*', '~sys-libs/db-4.3.29', True),
]:
this_atom = Atom(this)
that_atom = Atom(that)
self.assertEqual(
result, this_atom.intersects(that_atom),
'%s intersecting %s should be %s' % (this, that, result))
self.assertEqual(
result, that_atom.intersects(this_atom),
'%s intersecting %s should be %s' % (that, this, result))
def test_main():
test_support.run_unittest(TestGentoolkitAtom)
if __name__ == '__main__':
test_main()
|
djanderson/equery
|
pym/gentoolkit/test/test_atom.py
|
Python
|
gpl-2.0
| 5,622
|
[
"Brian"
] |
eb5b9d43137d566a54b35b3676dbab6bc05be4757249a77ddd1b7f02cbcbe03a
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RGtrellis(RPackage):
"""Genome Level Trellis Layout
Genome level Trellis graph visualizes genomic data conditioned by
genomic categories (e.g. chromosomes). For each genomic category,
multiple dimensional data which are represented as tracks describe
different features from different aspects. This package provides high
flexibility to arrange genomic categories and to add self-defined
graphics in the plot."""
homepage = "https://bioconductor.org/packages/gtrellis"
git = "https://git.bioconductor.org/packages/gtrellis.git"
version('1.22.0', commit='c071c5631f3dedda212aed87d9c02954b5ed6611')
version('1.16.1', commit='a9003ededc8f2a48c78d4545e2f214023c13a7da')
version('1.14.0', commit='93935fb34211d12b250e22291712e18a31b0208d')
version('1.12.1', commit='7f3941adddbbfa17f4cf474b703568678a38272d')
version('1.11.1', commit='ff47d99743fd697d5c724f7bb18131dfe76dee71')
version('1.8.0', commit='f813b420a008c459f63a2a13e5e64c5507c4c472')
depends_on('r@3.1.2:', type=('build', 'run'))
depends_on('r-iranges', type=('build', 'run'))
depends_on('r-genomicranges', type=('build', 'run'))
depends_on('r-circlize@0.3.3:', type=('build', 'run'))
depends_on('r-circlize@0.4.8:', when='@1.16.1', type=('build', 'run'))
depends_on('r-getoptlong', type=('build', 'run'))
|
LLNL/spack
|
var/spack/repos/builtin/packages/r-gtrellis/package.py
|
Python
|
lgpl-2.1
| 1,599
|
[
"Bioconductor"
] |
f186c07b023420e00f484bfcd2ffb0c5622ae4a2ba7c49ac8a7de0aae39bd2cd
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2008-2014 Jonathan F. Donges
# Author: Jonathan F. Donges <donges@pik-potsdam.de>
# URL: <http://www.pik-potsdam.de/members/donges/software>
"""
Computes auto-correlation function for irregularly sampled time series.
Uses the method proposed in:
Rehfeld, K., Marwan, N., Heitzig, J., & Kurths, J. (2011). Comparison of correlation analysis techniques for irregularly sampled time series. Nonlinear Processes in Geophysics, 18(3), 389-404.
This script provides analyses for this publication:
J.F. Donges, R.V. Donner, N. Marwan, S.F.M. Breitenbach, K. Rehfeld, and J. Kurths,
Nonlinear regime shifts in Holocene Asian monsoon variability: Potential impacts on cultural change and migratory patterns,
Climate of the Past 11, 709-741 (2015),
DOI: 10.5194/cp-11-709-2015
"""
#
# Imports
#
import sys
import numpy as np
import pylab
import progressbar
#
# Settings
#
# Filename
FILENAME_X = "../../data/raw_proxy_data/Dongge_DA.dat"
# Resolution of cross-correlation (units of time)
DELTA_LAG = 10 # Measured in years here
# Maximum lag index
MAX_LAG_INDEX = 100
# Toggle detrending
DETRENDING = True
DETRENDING_WINDOW_SIZE = 1000. # Measured in years here
#
# Functions
#
def detrend_time_series(time, data, window_size):
# Get length of data array
n = data.shape[0]
# Initialize a local copy of data array
detrended_data = np.empty(n)
# Detrend data
for j in xrange(n):
# Get lower and upper bound of window in time domain
lower_bound = time[j] - window_size / 2.
upper_bound = time[j] + window_size / 2.
# Get time indices lying within the window
window_indices = np.logical_and(time >= lower_bound, time <= upper_bound)
# Substract window mean from data point in the center
detrended_data[j] = data[j] - data[window_indices].mean()
return detrended_data
def gaussian(x, std):
"""
Returns value of gaussian distribution at x with 0 mean
and standard deviation std.
"""
return 1 / np.sqrt(2 * np.pi * std) * np.exp(-np.abs(x ** 2) / (2 * std**2) )
def kernel_auto_correlation_est(x, time_diff, kernel_func, kernel_param,
delta_lag, max_lag_index):
"""
Estimates auto correlation using a kernel function.
"""
# Normalize time series
x -= x.mean()
x /= x.std()
# Initialize discrete auto-correlation function
auto_correlation = np.zeros(max_lag_index + 1)
# Loop over all positive lags and zero lag
for k in xrange(max_lag_index + 1):
# Calculate b matrix
b = kernel_func(k * delta_lag - time_diff, kernel_param)
# Calculate nominator
nominator = np.dot(x, np.dot(b, x.transpose()))
# Calculate denominator
denominator = b.sum()
# Calculate auto-correlation
auto_correlation[k] = nominator / denominator
lag_times = delta_lag * np.arange(max_lag_index + 1)
return (lag_times, auto_correlation)
#
# Main script
#
# Load record x
data_x = np.loadtxt(FILENAME_X, unpack=False, usecols=(0,1,), comments="#")
#data_x = np.fromfile(FILENAME_X, sep=" ")
time_x = data_x[:,0]
x = data_x[:,1]
# Detrending of time series using moving window averages
if DETRENDING:
x = detrend_time_series(time_x, x, DETRENDING_WINDOW_SIZE)
# Get length of records
N_x = len(time_x)
# Get recommended standard deviation of gaussian Kernel (Kira Rehfeld's
# NPG paper)
sigma = 0.25 * np.diff(time_x).mean()
print "Length of record x:", N_x
print "Mean sampling time x:", np.diff(time_x).mean()
print "Recommended standard deviation of gaussian Kernel:", sigma
# Calculate matrix of time differences
time_diff = np.zeros((N_x, N_x))
for i in xrange(N_x):
for j in xrange(N_x):
time_diff[i,j] = time_x[i] - time_x[j]
# Estimate auto-correlation function
(lag_times, auto_correlation) = kernel_auto_correlation_est(x=x.copy(), time_diff=time_diff, kernel_func=gaussian, kernel_param=sigma, delta_lag=DELTA_LAG, max_lag_index=MAX_LAG_INDEX)
#
# Save results
#
results = np.zeros((MAX_LAG_INDEX + 1, 2))
results[:,0] = lag_times
results[:,1] = auto_correlation
np.savetxt("kernel_acf_dongge.txt", results)
#
# Plot results
#
# Set plotting parameters (for Clim. Past paper)
params = { 'figure.figsize': (6.,6.),
'axes.labelsize': 12,
'text.fontsize': 12,
'xtick.labelsize': 12,
'ytick.labelsize': 12,
'legend.fontsize': 10,
'title.fontsize': 12,
'text.usetex': False,
'font': 'Helvetica',
'mathtext.bf': 'helvetica:bold',
'xtick.major.pad': 6,
'ytick.major.pad': 6,
'xtick.major.size': 5,
'ytick.major.size': 5,
'tick.labelsize': 'small'
}
#pylab.rcParams.update(params)
# Plot time series
pylab.figure(1)
pylab.plot(time_x, x)
pylab.xlabel("Age (y B.P.)")
pylab.ylabel("Normalized values")
pylab.figure(2)
pylab.plot(lag_times, auto_correlation, "k")
pylab.axhline(y=1 / np.e, color="red")
pylab.xlabel("Time delay [y]")
pylab.ylabel("ACF")
pylab.ylim(-0.5,1)
pylab.savefig("auto_corr_irregular.pdf")
pylab.show()
|
pik-copan/pyregimeshifts
|
scripts/auto_correlation_function/auto_correlation_irregular_sampling.py
|
Python
|
mit
| 5,228
|
[
"Gaussian"
] |
132a64eb261032bd9b39e90e0fd7b7096162b5cecb94901950a8bbc1be1fb86d
|
# (C) British Crown Copyright 2010 - 2016, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
A package for handling multi-dimensional data and associated metadata.
.. note ::
The Iris documentation has further usage information, including
a :ref:`user guide <user_guide_index>` which should be the first port of
call for new users.
The functions in this module provide the main way to load and/or save
your data.
The :func:`load` function provides a simple way to explore data from
the interactive Python prompt. It will convert the source data into
:class:`Cubes <iris.cube.Cube>`, and combine those cubes into
higher-dimensional cubes where possible.
The :func:`load_cube` and :func:`load_cubes` functions are similar to
:func:`load`, but they raise an exception if the number of cubes is not
what was expected. They are more useful in scripts, where they can
provide an early sanity check on incoming data.
The :func:`load_raw` function is provided for those occasions where the
automatic combination of cubes into higher-dimensional cubes is
undesirable. However, it is intended as a tool of last resort! If you
experience a problem with the automatic combination process then please
raise an issue with the Iris developers.
To persist a cube to the file-system, use the :func:`save` function.
All the load functions share very similar arguments:
* uris:
Either a single filename/URI expressed as a string, or an
iterable of filenames/URIs.
Filenames can contain `~` or `~user` abbreviations, and/or
Unix shell-style wildcards (e.g. `*` and `?`). See the
standard library function :func:`os.path.expanduser` and
module :mod:`fnmatch` for more details.
* constraints:
Either a single constraint, or an iterable of constraints.
Each constraint can be either a string, an instance of
:class:`iris.Constraint`, or an instance of
:class:`iris.AttributeConstraint`. If the constraint is a string
it will be used to match against cube.name().
.. _constraint_egs:
For example::
# Load air temperature data.
load_cube(uri, 'air_temperature')
# Load data with a specific model level number.
load_cube(uri, iris.Constraint(model_level_number=1))
# Load data with a specific STASH code.
load_cube(uri, iris.AttributeConstraint(STASH='m01s00i004'))
* callback:
A function to add metadata from the originating field and/or URI which
obeys the following rules:
1. Function signature must be: ``(cube, field, filename)``.
2. Modifies the given cube inplace, unless a new cube is
returned by the function.
3. If the cube is to be rejected the callback must raise
an :class:`iris.exceptions.IgnoreCubeException`.
For example::
def callback(cube, field, filename):
# Extract ID from filenames given as: <prefix>__<exp_id>
experiment_id = filename.split('__')[1]
experiment_coord = iris.coords.AuxCoord(
experiment_id, long_name='experiment_id')
cube.add_aux_coord(experiment_coord)
"""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
import six
import contextlib
import glob
import itertools
import logging
import os.path
import threading
import iris.config
import iris.cube
import iris._constraints
from iris._deprecation import IrisDeprecation, warn_deprecated
import iris.fileformats
import iris.io
try:
import iris_sample_data
except ImportError:
iris_sample_data = None
# Iris revision.
__version__ = '1.10.0-DEV'
# Restrict the names imported when using "from iris import *"
__all__ = ['load', 'load_cube', 'load_cubes', 'load_raw',
'save', 'Constraint', 'AttributeConstraint', 'sample_data_path',
'site_configuration', 'Future', 'FUTURE',
'IrisDeprecation']
# When required, log the usage of Iris.
if iris.config.IMPORT_LOGGER:
logging.getLogger(iris.config.IMPORT_LOGGER).info('iris %s' % __version__)
Constraint = iris._constraints.Constraint
AttributeConstraint = iris._constraints.AttributeConstraint
class Future(threading.local):
"""Run-time configuration controller."""
def __init__(self, cell_datetime_objects=False, netcdf_promote=False,
strict_grib_load=False, netcdf_no_unlimited=False,
clip_latitudes=False):
"""
A container for run-time options controls.
To adjust the values simply update the relevant attribute from
within your code. For example::
iris.FUTURE.cell_datetime_objects = True
If Iris code is executed with multiple threads, note the values of
these options are thread-specific.
The option `cell_datetime_objects` controls whether the
:meth:`iris.coords.Coord.cell()` method returns time coordinate
values as simple numbers or as time objects with attributes for
year, month, day, etc. In particular, this allows one to express
certain time constraints using a simpler, more transparent
syntax, such as::
# To select all data defined at midday.
Constraint(time=lambda cell: cell.point.hour == 12)
# To ignore the 29th of February.
Constraint(time=lambda cell: cell.point.day != 29 and
cell.point.month != 2)
For more details, see :ref:`using-time-constraints`.
The option `netcdf_promote` controls whether the netCDF loader
will expose variables which define reference surfaces for
dimensionless vertical coordinates as independent Cubes.
The option `strict_grib_load` controls whether GRIB files are
loaded as Cubes using a new template-based conversion process.
This new conversion process will raise an exception when it
encounters a GRIB message which uses a template not supported
by the conversion.
The option `netcdf_no_unlimited`, when True, changes the
behaviour of the netCDF saver, such that no dimensions are set to
unlimited. The current default is that the leading dimension is
unlimited unless otherwise specified.
The option `clip_latitudes` controls whether the
:meth:`iris.coords.Coord.guess_bounds()` method limits the
guessed bounds to [-90, 90] for latitudes.
"""
self.__dict__['cell_datetime_objects'] = cell_datetime_objects
self.__dict__['netcdf_promote'] = netcdf_promote
self.__dict__['strict_grib_load'] = strict_grib_load
self.__dict__['netcdf_no_unlimited'] = netcdf_no_unlimited
self.__dict__['clip_latitudes'] = clip_latitudes
def __repr__(self):
msg = ('Future(cell_datetime_objects={}, netcdf_promote={}, '
'strict_grib_load={}, netcdf_no_unlimited={}, '
'clip_latitudes={})')
return msg.format(self.cell_datetime_objects, self.netcdf_promote,
self.strict_grib_load, self.netcdf_no_unlimited,
self.clip_latitudes)
def __setattr__(self, name, value):
if name not in self.__dict__:
msg = "'Future' object has no attribute {!r}".format(name)
raise AttributeError(msg)
self.__dict__[name] = value
@contextlib.contextmanager
def context(self, **kwargs):
"""
Return a context manager which allows temporary modification of
the option values for the active thread.
On entry to the `with` statement, all keyword arguments are
applied to the Future object. On exit from the `with`
statement, the previous state is restored.
For example::
with iris.FUTURE.context():
iris.FUTURE.cell_datetime_objects = True
# ... code which expects time objects
Or more concisely::
with iris.FUTURE.context(cell_datetime_objects=True):
# ... code which expects time objects
"""
# Save the current context
current_state = self.__dict__.copy()
# Update the state
for name, value in six.iteritems(kwargs):
setattr(self, name, value)
try:
yield
finally:
# Return the state
self.__dict__.clear()
self.__dict__.update(current_state)
#: Object containing all the Iris run-time options.
FUTURE = Future()
# Initialise the site configuration dictionary.
#: Iris site configuration dictionary.
site_configuration = {}
try:
from iris.site_config import update as _update
except ImportError:
pass
else:
_update(site_configuration)
def _generate_cubes(uris, callback, constraints):
"""Returns a generator of cubes given the URIs and a callback."""
if isinstance(uris, six.string_types):
uris = [uris]
# Group collections of uris by their iris handler
# Create list of tuples relating schemes to part names
uri_tuples = sorted(iris.io.decode_uri(uri) for uri in uris)
for scheme, groups in (itertools.groupby(uri_tuples, key=lambda x: x[0])):
# Call each scheme handler with the appropriate URIs
if scheme == 'file':
part_names = [x[1] for x in groups]
for cube in iris.io.load_files(part_names, callback, constraints):
yield cube
elif scheme in ['http', 'https']:
urls = [':'.join(x) for x in groups]
for cube in iris.io.load_http(urls, callback):
yield cube
else:
raise ValueError('Iris cannot handle the URI scheme: %s' % scheme)
def _load_collection(uris, constraints=None, callback=None):
try:
cubes = _generate_cubes(uris, callback, constraints)
result = iris.cube._CubeFilterCollection.from_cubes(cubes, constraints)
except EOFError as e:
raise iris.exceptions.TranslationError(
"The file appears empty or incomplete: {!r}".format(str(e)))
return result
def load(uris, constraints=None, callback=None):
"""
Loads any number of Cubes for each constraint.
For a full description of the arguments, please see the module
documentation for :mod:`iris`.
Args:
* uris:
One or more filenames/URIs.
Kwargs:
* constraints:
One or more constraints.
* callback:
A modifier/filter function.
Returns:
An :class:`iris.cube.CubeList`.
"""
return _load_collection(uris, constraints, callback).merged().cubes()
def load_cube(uris, constraint=None, callback=None):
"""
Loads a single cube.
For a full description of the arguments, please see the module
documentation for :mod:`iris`.
Args:
* uris:
One or more filenames/URIs.
Kwargs:
* constraints:
A constraint.
* callback:
A modifier/filter function.
Returns:
An :class:`iris.cube.Cube`.
"""
constraints = iris._constraints.list_of_constraints(constraint)
if len(constraints) != 1:
raise ValueError('only a single constraint is allowed')
cubes = _load_collection(uris, constraints, callback).merged().cubes()
try:
cube = cubes.merge_cube()
except iris.exceptions.MergeError as e:
raise iris.exceptions.ConstraintMismatchError(str(e))
except ValueError:
raise iris.exceptions.ConstraintMismatchError('no cubes found')
return cube
def load_cubes(uris, constraints=None, callback=None):
"""
Loads exactly one Cube for each constraint.
For a full description of the arguments, please see the module
documentation for :mod:`iris`.
Args:
* uris:
One or more filenames/URIs.
Kwargs:
* constraints:
One or more constraints.
* callback:
A modifier/filter function.
Returns:
An :class:`iris.cube.CubeList`.
"""
# Merge the incoming cubes
collection = _load_collection(uris, constraints, callback).merged()
# Make sure we have exactly one merged cube per constraint
bad_pairs = [pair for pair in collection.pairs if len(pair) != 1]
if bad_pairs:
fmt = ' {} -> {} cubes'
bits = [fmt.format(pair.constraint, len(pair)) for pair in bad_pairs]
msg = '\n' + '\n'.join(bits)
raise iris.exceptions.ConstraintMismatchError(msg)
return collection.cubes()
def load_raw(uris, constraints=None, callback=None):
"""
Loads non-merged cubes.
This function is provided for those occasions where the automatic
combination of cubes into higher-dimensional cubes is undesirable.
However, it is intended as a tool of last resort! If you experience
a problem with the automatic combination process then please raise
an issue with the Iris developers.
For a full description of the arguments, please see the module
documentation for :mod:`iris`.
Args:
* uris:
One or more filenames/URIs.
Kwargs:
* constraints:
One or more constraints.
* callback:
A modifier/filter function.
Returns:
An :class:`iris.cube.CubeList`.
"""
return _load_collection(uris, constraints, callback).cubes()
save = iris.io.save
def sample_data_path(*path_to_join):
"""
Given the sample data resource, returns the full path to the file.
.. note::
This function is only for locating files in the iris sample data
collection (installed separately from iris). It is not needed or
appropriate for general file access.
"""
target = os.path.join(*path_to_join)
if os.path.isabs(target):
raise ValueError('Absolute paths, such as {!r}, are not supported.\n'
'NB. This function is only for locating files in the '
'iris sample data collection. It is not needed or '
'appropriate for general file access.'.format(target))
if iris_sample_data is not None:
target = os.path.join(iris_sample_data.path, target)
else:
wmsg = ("iris.config.SAMPLE_DATA_DIR was deprecated in v1.10.0 and "
"will be removed in a future Iris release. Install the "
"'iris_sample_data' package.")
warn_deprecated(wmsg)
target = os.path.join(iris.config.SAMPLE_DATA_DIR, target)
if not glob.glob(target):
raise ValueError('Sample data file(s) at {!r} not found.\n'
'NB. This function is only for locating files in the '
'iris sample data collection. It is not needed or '
'appropriate for general file access.'.format(target))
return target
|
ehogan/iris
|
lib/iris/__init__.py
|
Python
|
lgpl-3.0
| 15,619
|
[
"NetCDF"
] |
6aed8871275c245f4eb26310fdc54701373494c354cbbb7fda6e5bd961cc1ebc
|
""" Class Distribution
This class implements a distribution object that is defined by its pdf
(probability density function)
Interestingly, I could not find in numpy/scipy a class that could implement a
distribution just from its pdf. The idea of such object is to be able to
compute statistics of this distribution without any pain.
Also this class implements basic operations such as + - / *, with scalars or
distributions, which comes handy when doing probabilities. The normalization
is left to the user's decision but can be quickly done using the normalize()
method
Note that operations with scalars requires to write the distribution on the
left side of the operators.
Implementation notes:
* the pdf is given by a linear interpolation of the samples,
* the pdf's norm is given by a scipy.integrate.simps integration (fast and robust)
* the cdf is given by the linear interpolation of the cumulative sum of the pdf samples.
* percentiles are calculated directly by bracketing the cdf and from linear interpolations
"""
from __future__ import print_function
import inspect
import numpy as np
from scipy.integrate import simps
class Distribution(object):
def __init__(self, x, pdf, name=None, *args, **kwargs):
if len(x) != len(pdf):
raise ValueError('x and pdf must have the same length')
ind = np.argsort(x)
self._pdf = np.asarray(pdf)[ind]
self._x = np.asarray(x)[ind]
self.norm = simps(self._pdf, self._x)
self.name = name
def pdf(self, x):
"""Probability density function"""
return np.interp(x, self._x, self._pdf) / self.norm
def cdf(self, x):
"""Cumulative distribution function"""
xp = self._x
fp = np.cumsum(self._pdf)
return np.interp(x, xp, fp) / self.norm
def sf(self, x):
"""Survival function = complementary CDF"""
return 1. - self.cdf(x, err=False)
def ppf(self, x):
"""Percentile point function (i.e. CDF inverse)"""
data = self._x
weights = self._pdf / self.norm
percentiles = np.clip(x * 100., 0., 100.)
# check if actually weighted percentiles is needed
if weights is None:
return np.percentile(data, list(percentiles))
if np.equal(weights, 1.).all():
return np.percentile(data, list(percentiles))
# make sure percentiles are fractions between 0 and 1
if not np.greater_equal(percentiles, 0.0).all():
raise ValueError("Percentiles less than 0")
if not np.less_equal(percentiles, 100.0).all():
raise ValueError("Percentiles greater than 100")
#Make sure data is in correct shape
shape = np.shape(data)
n = len(data)
if (len(shape) != 1):
raise ValueError("wrong data shape, expecting 1d")
if len(weights) != n:
raise ValueError("weights must be the same shape as data")
if not np.greater_equal(weights, 0.0).all():
raise ValueError("Not all weights are non-negative.")
_data = np.asarray(data, dtype=float)
if hasattr(percentiles, '__iter__'):
_p = np.asarray(percentiles, dtype=float) * 0.01
else:
_p = np.asarray([percentiles * 0.01], dtype=float)
_wt = np.asarray(weights, dtype=float)
len_p = len(_p)
sd = np.empty(n, dtype=float)
sw = np.empty(n, dtype=float)
aw = np.empty(n, dtype=float)
o = np.empty(len_p, dtype=float)
i = np.argsort(_data)
np.take(_data, i, axis=0, out=sd)
np.take(_wt, i, axis=0, out=sw)
np.add.accumulate(sw, out=aw)
if not aw[-1] > 0:
raise ValueError("Nonpositive weight sum")
w = (aw - 0.5 * sw) / aw[-1]
spots = np.searchsorted(w, _p)
for (pk, s, p) in zip(range(len_p), spots, _p):
if s == 0:
o[pk] = sd[0]
elif s == n:
o[pk] = sd[n - 1]
else:
f1 = (w[s] - p) / (w[s] - w[s - 1])
f2 = (p - w[s - 1]) / (w[s] - w[s - 1])
assert (f1 >= 0) and (f2 >= 0) and (f1 <= 1 ) and (f2 <= 1)
assert abs(f1 + f2 - 1.0) < 1e-6
o[pk] = sd[s - 1] * f1 + sd[s] * f2
return o
def isf(self, x):
"""Inverse survival function (Complementary CDF inverse)"""
return 1. - self.ppf(x)
@property
def mean(self):
e = (self._x * self._pdf).sum() / self._pdf.sum()
return e
@property
def variance(self):
m = self.mean
e = ( (self._x - m) ** 2 * self._pdf).sum() / self._pdf.sum()
return e
@property
def std(self):
return np.sqrt(self.variance)
@property
def skew(self):
return self.moment(3, reduced=True)
@property
def kurtosis(self):
return self.moment(4, reduced=True)
def moment(self, order, reduced=False):
"""Non-central moments"""
X = self._x - self.mean
if reduced is True:
X /= self.std
e = ( X ** order * self._pdf).sum() / self._pdf.sum()
return e
def rvs(self, N):
"""Random samples"""
x = np.random.uniform(0., 1., N)
return self.ppf(x)
def normalize(self):
""" Normalize the sampled pdf by its norm """
self._pdf / self.norm
def __add__(self, other):
""" Sum of distributions """
if np.isscalar(other):
name = '{:s} + {}'.format(self.name, other)
return Distribution(self._x, self._pdf + other, name=name)
elif isinstance(other, Distribution):
x0 = self._x
pdf0 = self._pdf
x1 = other._x
pdf1 = other._pdf
x = np.unique(np.hstack([x0, x1]))
y0 = np.interp(x, x0, pdf0)
y1 = np.interp(x, x1, pdf1)
name = '({:s}) + ({:s})'.format(self.name, other.name)
return Distribution(x, y0 + y1, name=name)
elif hasattr(other, '__call__'):
x0 = self._x
y0 = self._pdf
y1 = other(x0)
n1 = getattr(other, '__name__', 'f(...)')
if n1 == '<lambda>':
t = inspect.getsource(other).replace(' ', '')[:-1]
t = ''.join(t.split('lambda')[1:]).split(':')
n1 = 'f({t[0]}) = {t[1]}'.format(t=t)
name = '({:s}) + ({:s})'.format(self.name, n1)
return Distribution(x0, y0 + y1, name=name)
def __sub__(self, other):
""" Subtract distribution """
if np.isscalar(other):
name = '{:s} + {}'.format(self.name, other)
return Distribution(self._x, self._pdf + other, name=name)
elif isinstance(other, Distribution):
x0 = self._x
pdf0 = self._pdf
x1 = other._x
pdf1 = other._pdf
x = np.unique(np.hstack([x0, x1]))
y0 = np.interp(x, x0, pdf0)
y1 = np.interp(x, x1, pdf1)
name = '({:s}) - ({:s})'.format(self.name, other.name)
return Distribution(x, y0 - y1, name=name)
elif hasattr(other, '__call__'):
x0 = self._x
y0 = self._pdf
y1 = other(x0)
n1 = getattr(other, '__name__', 'f(...)')
if n1 == '<lambda>':
t = inspect.getsource(other).replace(' ', '')[:-1]
t = ''.join(t.split('lambda')[1:]).split(':')
n1 = 'f({t[0]}) = {t[1]}'.format(t=t)
name = '({:s}) - ({:s})'.format(self.name, n1)
return Distribution(x0, y0 - y1, name=name)
def __mul__(self, other):
""" multiply distribution """
if np.isscalar(other):
name = '{1} * {0:s}'.format(self.name, other)
return Distribution(self._x, self._pdf * other, name=name)
elif isinstance(other, Distribution):
x0 = self._x
pdf0 = self._pdf
x1 = other._x
pdf1 = other._pdf
x = np.unique(np.hstack([x0, x1]))
y0 = np.interp(x, x0, pdf0)
y1 = np.interp(x, x1, pdf1)
name = '({:s}) * ({:s})'.format(self.name, other.name)
return Distribution(x, y0 * y1, name=name)
elif hasattr(other, '__call__'):
x0 = self._x
y0 = self._pdf
y1 = other(x0)
n1 = getattr(other, '__name__', 'f(...)')
if n1 == '<lambda>':
t = inspect.getsource(other).replace(' ', '')[:-1]
t = ''.join(t.split('lambda')[1:]).split(':')
n1 = 'f({t[0]}) = {t[1]}'.format(t=t)
name = '({:s}) * ({:s})'.format(self.name, n1)
return Distribution(x0, y0 * y1, name=name)
def __div__(self, other):
""" multiply distribution """
if np.isscalar(other):
name = '{:s} / {}'.format(self.name, other)
return Distribution(self._x, self._pdf / other, name=name)
elif isinstance(other, Distribution):
x0 = self._x
pdf0 = self._pdf
x1 = other._x
pdf1 = other._pdf
x = np.unique(np.hstack([x0, x1]))
y0 = np.interp(x, x0, pdf0)
y1 = np.interp(x, x1, pdf1)
name = '({:s}) / ({:s})'.format(self.name, other.name)
return Distribution(x, y0 / y1, name=name)
elif hasattr(other, '__call__'):
x0 = self._x
y0 = self._pdf
y1 = other(x0)
n1 = getattr(other, '__name__', 'f(...)')
if n1 == '<lambda>':
t = inspect.getsource(other).replace(' ', '')[:-1]
t = ''.join(t.split('lambda')[1:]).split(':')
n1 = 'f({t[0]}) = {t[1]}'.format(t=t)
name = '({:s}) / ({:s})'.format(self.name, n1)
return Distribution(x0, y0 / y1, name=name)
def __repr__(self):
return '{}\n{:s}'.format(object.__repr__(self), self.name)
def __str__(self):
return '{:s}'.format(self.name)
def __call__(self, x):
return self.pdf(x)
def main():
""" Test case: combining 4 experimental measurements
Let's have 4 independent measurements of the same quantity with Gaussian uncertainties.
The measurements are samples of a given Gaussian distribution of which
we want to estimate the mean and dispersion values
A quick Bayesian inference (with uniform priors) will show that if all
measurements are from the same distribution, the production of the 4
posterior distributions will give you the underlying Gaussian
parameters.
if mu = {mk}, and sig = {sk} for k=1, N:
p(m, s | mu, sig) ~ prod_k p(mk, sk | m, s) p(m, s)
We also find that the product of Gaussians is a Gaussian
"""
import pylab as plt
# define a (normalized) gaussian probability distribution function
Normal = lambda x, m, s: 1. / np.sqrt(2. * np.pi * s ** 2) * np.exp(-0.5 * ((x - m) / s) ** 2 )
x = np.arange(0, 6, 0.01)
mu = np.array([ 3.3, 2.65, 2.4, 3.14])
sig = np.array([ 0.38, 0.17, 0.3, 0.34])
yk = [Distribution(x, Normal(x, mk, sk), name='N({:0.3f},{:0.3f}'.format(mk, sk) ) for (mk, sk) in zip(mu, sig)]
B = yk[0]
for k in yk[1:]:
B *= k
print('{:6s} {:6s} {:6s}'.format(*'norm mean std'.split()))
for k in yk:
print('{y.norm:5.3g} {y.mean:5.3g} {y.std:5.3g}'.format(y=k))
plt.plot(x, k._pdf)
plt.plot(x, B._pdf, lw=2, color='0.0')
print("final distribution:")
print("Expr: {B.name}\n stats: \n mean = {B.mean},\n std = {B.std},\n skew = {B.skew},\n kurtosis = {B.kurtosis}".format(B=B))
if __name__ == '__main__':
main()
|
mfouesneau/faststats
|
faststats/distrib.py
|
Python
|
mit
| 11,868
|
[
"Gaussian"
] |
6444b9c3303f10b23f2caa297b41b30627449c59f173ca61fb8691af3c3573f3
|
# Copyright 2008-2012 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from robot.model import ItemList
from robot.utils import setter
from .message import Message
class ExecutionErrors(object):
message_class = Message
def __init__(self, messages=None):
self.messages = messages
@setter
def messages(self, msgs):
return ItemList(self.message_class, items=msgs)
def add(self, other):
self.messages.extend(other.messages)
def visit(self, visitor):
visitor.visit_errors(self)
def __iter__(self):
return iter(self.messages)
def __len__(self):
return len(self.messages)
|
Senseg/robotframework
|
src/robot/result/executionerrors.py
|
Python
|
apache-2.0
| 1,189
|
[
"VisIt"
] |
27fff73773db7f0350c191cd58b91403184702e58d8d1892e68e9b6a78512979
|
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import glance_store
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import encodeutils
import six
import webob
from glance.api import policy
from glance.common import exception
from glance.common import timeutils
from glance.common import utils
from glance.common import wsgi
import glance.db
import glance.gateway
from glance.i18n import _
import glance.notifier
import glance.schema
LOG = logging.getLogger(__name__)
class ImageMembersController(object):
def __init__(self, db_api=None, policy_enforcer=None, notifier=None,
store_api=None):
self.db_api = db_api or glance.db.get_api()
self.policy = policy_enforcer or policy.Enforcer()
self.notifier = notifier or glance.notifier.Notifier()
self.store_api = store_api or glance_store
self.gateway = glance.gateway.Gateway(self.db_api, self.store_api,
self.notifier, self.policy)
def _get_member_repo(self, req, image):
try:
# For public images, a forbidden exception with message
# "Public images do not have members" is thrown.
return self.gateway.get_member_repo(image, req.context)
except exception.Forbidden as e:
msg = (_("Error fetching members of image %(image_id)s: "
"%(inner_msg)s") % {"image_id": image.image_id,
"inner_msg": e.msg})
LOG.warning(msg)
raise webob.exc.HTTPForbidden(explanation=msg)
def _lookup_image(self, req, image_id):
image_repo = self.gateway.get_repo(req.context)
try:
return image_repo.get(image_id)
except (exception.NotFound):
msg = _("Image %s not found.") % image_id
LOG.warning(msg)
raise webob.exc.HTTPNotFound(explanation=msg)
except exception.Forbidden:
msg = _("You are not authorized to lookup image %s.") % image_id
LOG.warning(msg)
raise webob.exc.HTTPForbidden(explanation=msg)
def _lookup_member(self, req, image, member_id):
member_repo = self._get_member_repo(req, image)
try:
return member_repo.get(member_id)
except (exception.NotFound):
msg = (_("%(m_id)s not found in the member list of the image "
"%(i_id)s.") % {"m_id": member_id,
"i_id": image.image_id})
LOG.warning(msg)
raise webob.exc.HTTPNotFound(explanation=msg)
except exception.Forbidden:
msg = (_("You are not authorized to lookup the members of the "
"image %s.") % image.image_id)
LOG.warning(msg)
raise webob.exc.HTTPForbidden(explanation=msg)
@utils.mutating
def create(self, req, image_id, member_id):
"""
Adds a membership to the image.
:param req: the Request object coming from the wsgi layer
:param image_id: the image identifier
:param member_id: the member identifier
:retval The response body is a mapping of the following form::
{'member_id': <MEMBER>,
'image_id': <IMAGE>,
'status': <MEMBER_STATUS>
'created_at': ..,
'updated_at': ..}
"""
image = self._lookup_image(req, image_id)
member_repo = self._get_member_repo(req, image)
image_member_factory = self.gateway.get_image_member_factory(
req.context)
try:
new_member = image_member_factory.new_image_member(image,
member_id)
member_repo.add(new_member)
return new_member
except exception.Forbidden:
msg = _("Not allowed to create members for image %s.") % image_id
LOG.warning(msg)
raise webob.exc.HTTPForbidden(explanation=msg)
except exception.Duplicate:
msg = _("Member %(member_id)s is duplicated for image "
"%(image_id)s") % {"member_id": member_id,
"image_id": image_id}
LOG.warning(msg)
raise webob.exc.HTTPConflict(explanation=msg)
except exception.ImageMemberLimitExceeded as e:
msg = (_("Image member limit exceeded for image %(id)s: %(e)s:")
% {"id": image_id,
"e": encodeutils.exception_to_unicode(e)})
LOG.warning(msg)
raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg)
@utils.mutating
def update(self, req, image_id, member_id, status):
"""
Adds a membership to the image.
:param req: the Request object coming from the wsgi layer
:param image_id: the image identifier
:param member_id: the member identifier
:retval The response body is a mapping of the following form::
{'member_id': <MEMBER>,
'image_id': <IMAGE>,
'status': <MEMBER_STATUS>
'created_at': ..,
'updated_at': ..}
"""
image = self._lookup_image(req, image_id)
member_repo = self._get_member_repo(req, image)
member = self._lookup_member(req, image, member_id)
try:
member.status = status
member_repo.save(member)
return member
except exception.Forbidden:
msg = _("Not allowed to update members for image %s.") % image_id
LOG.warning(msg)
raise webob.exc.HTTPForbidden(explanation=msg)
except ValueError as e:
msg = (_("Incorrect request: %s")
% encodeutils.exception_to_unicode(e))
LOG.warning(msg)
raise webob.exc.HTTPBadRequest(explanation=msg)
def index(self, req, image_id):
"""
Return a list of dictionaries indicating the members of the
image, i.e., those tenants the image is shared with.
:param req: the Request object coming from the wsgi layer
:param image_id: The image identifier
:retval The response body is a mapping of the following form::
{'members': [
{'member_id': <MEMBER>,
'image_id': <IMAGE>,
'status': <MEMBER_STATUS>
'created_at': ..,
'updated_at': ..}, ..
]}
"""
image = self._lookup_image(req, image_id)
member_repo = self._get_member_repo(req, image)
members = []
try:
for member in member_repo.list():
members.append(member)
except exception.Forbidden:
msg = _("Not allowed to list members for image %s.") % image_id
LOG.warning(msg)
raise webob.exc.HTTPForbidden(explanation=msg)
return dict(members=members)
def show(self, req, image_id, member_id):
"""
Returns the membership of the tenant wrt to the image_id specified.
:param req: the Request object coming from the wsgi layer
:param image_id: The image identifier
:retval The response body is a mapping of the following form::
{'member_id': <MEMBER>,
'image_id': <IMAGE>,
'status': <MEMBER_STATUS>
'created_at': ..,
'updated_at': ..}
"""
try:
image = self._lookup_image(req, image_id)
return self._lookup_member(req, image, member_id)
except webob.exc.HTTPForbidden as e:
# Convert Forbidden to NotFound to prevent information
# leakage.
raise webob.exc.HTTPNotFound(explanation=e.explanation)
@utils.mutating
def delete(self, req, image_id, member_id):
"""
Removes a membership from the image.
"""
image = self._lookup_image(req, image_id)
member_repo = self._get_member_repo(req, image)
member = self._lookup_member(req, image, member_id)
try:
member_repo.remove(member)
return webob.Response(body='', status=204)
except exception.Forbidden:
msg = _("Not allowed to delete members for image %s.") % image_id
LOG.warning(msg)
raise webob.exc.HTTPForbidden(explanation=msg)
class RequestDeserializer(wsgi.JSONRequestDeserializer):
def __init__(self):
super(RequestDeserializer, self).__init__()
def _get_request_body(self, request):
output = super(RequestDeserializer, self).default(request)
if 'body' not in output:
msg = _('Body expected in request.')
raise webob.exc.HTTPBadRequest(explanation=msg)
return output['body']
def create(self, request):
body = self._get_request_body(request)
try:
member_id = body['member']
if not member_id:
raise ValueError()
except KeyError:
msg = _("Member to be added not specified")
raise webob.exc.HTTPBadRequest(explanation=msg)
except ValueError:
msg = _("Member can't be empty")
raise webob.exc.HTTPBadRequest(explanation=msg)
return dict(member_id=member_id)
def update(self, request):
body = self._get_request_body(request)
try:
status = body['status']
except KeyError:
msg = _("Status not specified")
raise webob.exc.HTTPBadRequest(explanation=msg)
return dict(status=status)
class ResponseSerializer(wsgi.JSONResponseSerializer):
def __init__(self, schema=None):
super(ResponseSerializer, self).__init__()
self.schema = schema or get_schema()
def _format_image_member(self, member):
member_view = {}
attributes = ['member_id', 'image_id', 'status']
for key in attributes:
member_view[key] = getattr(member, key)
member_view['created_at'] = timeutils.isotime(member.created_at)
member_view['updated_at'] = timeutils.isotime(member.updated_at)
member_view['schema'] = '/v2/schemas/member'
member_view = self.schema.filter(member_view)
return member_view
def create(self, response, image_member):
image_member_view = self._format_image_member(image_member)
body = jsonutils.dumps(image_member_view, ensure_ascii=False)
response.unicode_body = six.text_type(body)
response.content_type = 'application/json'
def update(self, response, image_member):
image_member_view = self._format_image_member(image_member)
body = jsonutils.dumps(image_member_view, ensure_ascii=False)
response.unicode_body = six.text_type(body)
response.content_type = 'application/json'
def index(self, response, image_members):
image_members = image_members['members']
image_members_view = []
for image_member in image_members:
image_member_view = self._format_image_member(image_member)
image_members_view.append(image_member_view)
totalview = dict(members=image_members_view)
totalview['schema'] = '/v2/schemas/members'
body = jsonutils.dumps(totalview, ensure_ascii=False)
response.unicode_body = six.text_type(body)
response.content_type = 'application/json'
def show(self, response, image_member):
image_member_view = self._format_image_member(image_member)
body = jsonutils.dumps(image_member_view, ensure_ascii=False)
response.unicode_body = six.text_type(body)
response.content_type = 'application/json'
_MEMBER_SCHEMA = {
'member_id': {
'type': 'string',
'description': _('An identifier for the image member (tenantId)')
},
'image_id': {
'type': 'string',
'description': _('An identifier for the image'),
'pattern': ('^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}'
'-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$'),
},
'created_at': {
'type': 'string',
'description': _('Date and time of image member creation'),
# TODO(brian-rosmaita): our jsonschema library doesn't seem to like the
# format attribute, figure out why (and also fix in images.py)
# 'format': 'date-time',
},
'updated_at': {
'type': 'string',
'description': _('Date and time of last modification of image member'),
# 'format': 'date-time',
},
'status': {
'type': 'string',
'description': _('The status of this image member'),
'enum': [
'pending',
'accepted',
'rejected'
]
},
'schema': {
'readOnly': True,
'type': 'string'
}
}
def get_schema():
properties = copy.deepcopy(_MEMBER_SCHEMA)
schema = glance.schema.Schema('member', properties)
return schema
def get_collection_schema():
member_schema = get_schema()
return glance.schema.CollectionSchema('members', member_schema)
def create_resource():
"""Image Members resource factory method"""
deserializer = RequestDeserializer()
serializer = ResponseSerializer()
controller = ImageMembersController()
return wsgi.Resource(controller, deserializer, serializer)
|
dims/glance
|
glance/api/v2/image_members.py
|
Python
|
apache-2.0
| 14,091
|
[
"Brian"
] |
37f66c4f73ae38c0dab42a3a69df6e9fc1eaafcdd57d1a869a9b664e132ed8a1
|
import numpy as np
from ..fileio import FileIO as psopen
from ...common import pandas
#import pysal_examples
from ... import examples as pysal_examples
import unittest as ut
PANDAS_EXTINCT = pandas is None
class Test_Table(ut.TestCase):
def setUp(self):
self.filehandler = psopen(pysal_examples.get_path('columbus.dbf'))
self.df = self.filehandler.to_df()
self.filehandler.seek(0)
self.shapefile = psopen(pysal_examples.get_path('columbus.shp'))
self.csvhandler = psopen(pysal_examples.get_path('usjoin.csv'))
self.csv_df = self.csvhandler.to_df()
self.csvhandler.seek(0)
@ut.skipIf(PANDAS_EXTINCT, 'missing pandas')
def test_to_df(self):
for column in self.csv_df.columns:
if column.lower() == 'name':
continue
np.testing.assert_allclose(self.csvhandler.by_col(column),
self.csv_df[column].values)
for column in self.df.columns:
if column == 'geometry':
continue
np.testing.assert_allclose(self.filehandler.by_col(column),
self.df[column])
|
lixun910/pysal
|
pysal/lib/io/tests/test_Tables.py
|
Python
|
bsd-3-clause
| 1,188
|
[
"COLUMBUS"
] |
2698fa1f9fd669e80fd2d5a816bf0008442c144cffc00f713d1059af3af5d537
|
# Copyright (C) 2013, Thomas Leonard
# See the README file for details, or visit http://0install.net.
from __future__ import print_function
import os
from xml.dom import minidom
from zeroinstall.injector import namespaces
from __main__ import die
mydir = os.path.dirname(__file__)
def get_choice(msg, options):
print()
print(msg)
print()
for i, label in options:
print("{i}) {label}".format(i = i, label = label))
while True:
try:
n = int(input("\n> "))
except ValueError:
print("Not an integer")
continue
for i, label in options:
if i == n:
return n
print("Invalid choice")
def create(options):
template = options.template
if template.endswith('.xml.template'):
remote = True
elif template.endswith('.xml'):
remote = False
else:
die("'{template}' does not end with .xml.template or .xml".format(template = template))
print("'{template}' does not exist; creating new template.".format(template = template))
if not remote:
print("\nAs it ends with .xml, not .xml.template, I assume you want a feed for\n"
"a local project (e.g. a Git checkout). If you want a template for\n"
"publishing existing releases, use {template}.template instead.".format(
template = template))
if options.from_feed is None:
doc = minidom.parse(os.path.join(mydir, "example.xml"))
impls = doc.getElementsByTagNameNS(namespaces.XMLNS_IFACE, 'implementation')
impls[0].parentNode.removeChild(impls[0] if remote else impls[1])
choice = get_choice("Does your program need to be compiled before it can be used?", [
(1, "Generate a source template (e.g. for compiling C source code)"),
(2, "Generate a binary template (e.g. for a pre-compiled binary or script)"),
])
commands = doc.getElementsByTagNameNS(namespaces.XMLNS_IFACE, 'command')
commands[0].parentNode.removeChild(commands[choice - 1])
impl, = doc.getElementsByTagNameNS(namespaces.XMLNS_IFACE, 'implementation')
if choice == 1:
impl.setAttribute('arch', '*-src')
else:
impl.setAttribute('arch', '*-*')
else:
if remote:
import infer
doc = infer.from_feed(options.from_feed)
else:
die("--from-feed can only be used to create new templates, not local feeds".format(template=template))
assert not os.path.exists(template), template
print("\nWriting", template)
with open(template, 'wt') as stream:
stream.write('<?xml version="1.0"?>\n')
doc.documentElement.writexml(stream)
stream.write('\n')
|
0install/0template
|
create.py
|
Python
|
lgpl-2.1
| 2,453
|
[
"VisIt"
] |
02b30b8c5ed374d6d2c725e649515a9547cdb7ddd7a5d0448476fed233405988
|
#
# Copyright (c) 2010 Brian E. Granger
#
# This file is part of pyzmq.
#
# pyzmq is free software; you can redistribute it and/or modify it under
# the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# pyzmq is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from unittest import TestCase
import zmq
from zmq.tests import BaseZMQTestCase
#-----------------------------------------------------------------------------
# Tests
#-----------------------------------------------------------------------------
class TestPubSub(BaseZMQTestCase):
pass
# We are disabling this test while an issue is being resolved.
# def test_basic(self):
# s1, s2 = self.create_bound_pair(zmq.PUB, zmq.SUB)
# s2.setsockopt(zmq.SUBSCRIBE,'')
# import time; time.sleep(0.5)
# msg1 = 'message'
# s1.send(msg1)
# msg2 = s2.recv() # This is blocking!
# self.assertEquals(msg1, msg2)
def test_topic(self):
s1, s2 = self.create_bound_pair(zmq.PUB, zmq.SUB)
s2.setsockopt(zmq.SUBSCRIBE, 'x')
import time; time.sleep(0.1)
msg1 = 'message'
s1.send(msg1)
self.assertRaisesErrno(zmq.EAGAIN, s2.recv, zmq.NOBLOCK)
msg1 = 'xmessage'
s1.send(msg1)
msg2 = s2.recv()
self.assertEquals(msg1, msg2)
|
takluyver/pyzmq
|
zmq/tests/test_pubsub.py
|
Python
|
lgpl-3.0
| 1,980
|
[
"Brian"
] |
cf07775f3455792897568c004622c20cc5b9c55139f54962965cef2ce19abdb4
|
#
# Copyright 2015, 2017, 2020-2021 Lars Pastewka (U. Freiburg)
# 2014-2015, 2017-2018 James Kermode (Warwick U.)
#
# matscipy - Materials science with Python at the atomic-scale
# https://github.com/libAtoms/matscipy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import numpy as np
from ase.atoms import Atoms
from ase.calculators.calculator import Calculator
from ase.constraints import FixAtoms
from ase.geometry import cellpar_to_cell
from matscipy.neighbours import neighbour_list
from matscipy.fracture_mechanics.crack import (ConstantStrainRate,
get_strain)
def triangular_lattice_slab(a, n, m):
# primitive unit cell
## a = Atoms('H', [(0, 0, 0)],
## cell=cellpar_to_cell([a, a, 10*a, 90, 90, 120]),
## pbc=[True, True, False])
# cubic unit cell
c = 10*a
a = Atoms('H2',[(0, 0, c/2),
(0.5*a, np.sqrt(3)*a/2, c/2)],
cell=[[a, 0, 0],
[0, np.sqrt(3)*a, 0],
[0, 0, c]],
pbc=[True, True, True])
# we use unit masses
a.set_masses([1]*len(a))
return a * (n, m, 1)
def find_triangles_2d(atoms, cutoff, minangle=30*np.pi/180, maxangle=120*np.pi/180,
xdim=0, ydim=1):
"""
Return a list of all triangles of a triangular lattice sitting in the x-y
plane.
"""
# Contains atom indices that border the triangle
corner1 = []
corner2 = []
corner3 = []
# Find triangles
i, j, D = neighbour_list('ijD', atoms, cutoff)
coord = np.bincount(i)
for k in range(len(atoms)):
firstn = np.searchsorted(i, k, side='left')
lastn = np.searchsorted(i, k, side='right')
# Sort six neighbors by angle
angles = np.arctan2(D[firstn:lastn, xdim], D[firstn:lastn, ydim])
s = np.argsort(angles)
# Only pick triangles with angles between min and max angle
trangles = (angles[np.roll(s, -1)]-angles[s]) % (2*np.pi)
m = (trangles > minangle) & (trangles < maxangle)
# Add corners of triangle to lists
corner1 += list(np.array([k]*(lastn-firstn))[m])
corner2 += list(j[firstn:lastn][s][m])
corner3 += list(j[firstn:lastn][np.roll(s, -1)][m])
# Sort corners
corner1, corner2, corner3 = np.sort([corner1, corner2, corner3], axis=0)
# Remove duplicate triangles
uniqueid = corner3+len(atoms)*(corner2+len(atoms)*corner1)
_, s = np.unique(uniqueid, return_index=True)
return corner1[s], corner2[s], corner3[s]
class IdealBrittleSolid(Calculator):
"""
Implementation of force field for an ideal brittle solid
Described in Marder, Int. J. Fract. 130, 517-555 (2004)
"""
implemented_properties = ['energy', 'free_energy', 'energies', 'stress',
'forces']
default_parameters = {'a': 1.0, # lattice constant
'rc': 1.01, # cutoff
'k': 1.0, # spring constant
'beta': 0.01, # Kelvin dissipation
'b': 0.01, # Stokes dissipation
'linear': False # Linearized response
}
def __init__(self, *args, **kwargs):
Calculator.__init__(self, *args, **kwargs)
self.crystal_bonds = 0
def set_reference_crystal(self, crystal):
rc = self.parameters['rc']
self.crystal = crystal.copy()
i = neighbour_list('i', self.crystal, rc)
self.crystal_bonds = len(i)
def calculate(self, atoms, properties, system_changes):
Calculator.calculate(self, atoms, properties, system_changes)
a = self.parameters['a']
rc = self.parameters['rc']
k = self.parameters['k']
beta = self.parameters['beta']
linear = self.parameters['linear']
energies = np.zeros(len(atoms))
forces = np.zeros((len(atoms), 3))
velocities = (atoms.get_momenta().T/atoms.get_masses()).T
if not linear:
i, j, dr, r = neighbour_list('ijDd', atoms, rc)
dr_hat = (dr.T/r).T
dv = velocities[j] - velocities[i]
de = 0.5*k*(r - a)**2 # spring energies
e = 0.5*de # half goes to each end of spring
f = (k*(r - a)*dr_hat.T).T + beta*dv
else:
# Linearized response
i, j, D, S = neighbour_list('ijDS', atoms, rc)
# Displacements
u = atoms.positions - self.crystal.positions
# Bond vector taken from reference configuration
#dr = self.crystal.positions[j] - self.crystal.positions[i] + \
# S.dot(self.crystal.cell)
dr = self.crystal.positions[j] - self.crystal.positions[i] + S.dot(self.crystal.cell)
r = np.sqrt((dr*dr).sum(axis=-1))
dr_hat = (dr.T/r).T
dv = velocities[j] - velocities[i]
de = 0.5*k*(((u[j] - u[i])*dr_hat).sum(axis=-1))**2 # spring energies
e = 0.5*de # half goes to each end of spring
f = ((k*(u[j] - u[i])*dr_hat).sum(axis=-1)*dr_hat.T).T + beta*dv
energies[:] = np.bincount(i, e, minlength=len(atoms))
for kk in range(3):
forces[:, kk] = np.bincount(i, weights=f[:, kk],
minlength=len(atoms))
energy = energies.sum()
# add energy 0.5*k*(rc - a)**2 for each broken bond
if not linear and len(i) < self.crystal_bonds:
de = 0.5*k*(rc - a)**2
energy += 0.5*de*(self.crystal_bonds - len(i))
# Stokes dissipation
if 'stokes' in atoms.arrays:
b = atoms.get_array('stokes')
forces -= (velocities.T*b).T
self.results = {'energy': energy,
'free_energy': energy,
'energies': energies,
'forces': forces}
# Virial
if not linear:
virial = np.zeros(6)
if len(i) > 0:
virial = 0.5*np.array([dr[:,0]*f[:,0], # xx
dr[:,1]*f[:,1], # yy
dr[:,2]*f[:,2], # zz
dr[:,1]*f[:,2], # yz
dr[:,0]*f[:,2], # xz
dr[:,0]*f[:,1]]).sum(axis=1) # xy
self.results['stress'] = virial/atoms.get_volume()
def get_wave_speeds(self, atoms):
"""
Return longitudinal, shear and Rayleigh wave speeds
"""
k = self.parameters['k']
a = self.parameters['a']
m = atoms.get_masses()[0]
ka2_over_m = np.sqrt(k*a**2/m)
c_l = np.sqrt(9./8.*ka2_over_m)
c_s = np.sqrt(3./8.*ka2_over_m)
c_R = 0.563*ka2_over_m
return c_l, c_s, c_R
def get_elastic_moduli(self):
"""
Return Lam\'e constants lambda and mu
"""
k = self.parameters['k']
a = self.parameters['a']
lam = np.sqrt(3.0)/2.0*k/a
mu = lam
return lam, mu
def get_youngs_modulus(self):
k = self.parameters['k']
a = self.parameters['a']
return 5.0*np.sqrt(3.0)/4.0*k/a
def get_poisson_ratio(self):
return 0.25
def find_crack_tip(atoms, dt=None, store=True, results=None):
"""
Return atom at the crack tip and its x-coordinate
Crack tip is defined to be location of rightmost atom
whose nearest neighbour is at distance > 2.5*a
"""
calc = atoms.get_calculator()
a = calc.parameters['a']
rc = calc.parameters['rc']
i = neighbour_list('i', atoms, rc)
nn = np.bincount(i) # number of nearest neighbours, equal to 6 in bulk
x = atoms.positions[:, 0]
y = atoms.positions[:, 1]
bottom = y.min()
left = x.min()
width = x.max() - x.min()
height = y.max() - y.min()
old_tip_x = atoms.info.get('tip_x', left + 0.3*width)
# crack cannot have advanced more than c_R*dt
if dt is not None:
cl, ct, cR = calc.get_wave_speeds(atoms)
tip_max_x = old_tip_x + 10.0*cR*dt # FIXME definition of cR seems wrong, shouldn't need factor of 10 here...
else:
tip_max_x = left + 0.8*width
broken = ((nn != 6) &
(x > left + 0.2*width) & (x < tip_max_x) &
(y > bottom + 0.1*height) & (y < bottom + 0.9*height))
index = atoms.positions[broken, 0].argmax()
tip_atom = broken.nonzero()[0][index]
tip_x = atoms.positions[tip_atom, 0]
strain = get_strain(atoms)
eps_G = atoms.info['eps_G']
print('tip_x: %.3f strain: %.4f delta: %.3f' % (tip_x, strain, strain/eps_G))
if store:
atoms.info['tip_atom'] = tip_atom
atoms.info['tip_x'] = tip_x
if results is not None:
results.append(tip_x)
return (tip_atom, tip_x, broken)
def set_initial_velocities(c):
"""
Initialise a dynamical state by kicking some atoms behind tip
"""
tip_atom, tip_x, broken = find_crack_tip(c, store=False)
init_atoms = broken.nonzero()[0][c.positions[broken, 0].argsort()[-8:]]
upper = list(init_atoms[c.positions[init_atoms, 1] > 0])
lower = list(init_atoms[c.positions[init_atoms, 1] < 0])
calc = c.get_calculator()
cl, ct, cR = calc.get_wave_speeds(c)
v0 = cl/10.
v = np.zeros((len(c), 3))
v[upper, 1] = +v0
v[lower, 1] = -v0
c.set_velocities(v)
print('Setting velocities of upper=%s, lower=%s to +/- %.2f' % (upper, lower, v0))
return (upper, lower, v0)
def set_constraints(c, a):
# fix atoms in the top and bottom rows
top = c.positions[:, 1].max()
bottom = c.positions[:, 1].min()
left = c.positions[:, 0].min()
right = c.positions[:, 0].max()
fixed_mask = ((abs(c.positions[:, 1] - top) < 0.5*a) |
(abs(c.positions[:, 1] - bottom) < 0.5*a))
fix_atoms = FixAtoms(mask=fixed_mask)
if 'fix' in c.arrays:
c.set_array('fix', fixed_mask)
else:
c.new_array('fix', fixed_mask)
print('Fixed %d atoms' % fixed_mask.sum())
c.set_constraint(fix_atoms)
# Stokes damping regions at left and right of slab
stokes = np.zeros(len(c))
x = c.positions[:, 0]
stokes[:] = 0.0
stokes[x < left + 5.0*a] = (1.0 - (x-left)/(5.0*a))[x < left + 5.0*a]
stokes[x > right - 10.0*a] = (1.0 - (right-x)/(10.0*a))[x > right - 10.0*a]
if 'stokes' in c.arrays:
c.set_array('stokes', stokes)
else:
c.new_array('stokes', stokes)
print('Applying Stokes damping to %d atoms' % (stokes != 0.0).sum())
def extend_strip(atoms, a, N, M, vacuum):
x = atoms.positions[:, 0]
left = x.min()
width = x.max() - x.min()
tip_x = atoms.info['tip_x']
if tip_x < left + 0.6*width:
# only need to extend strip when crack gets near end
return False
print('tip_x (%.2f) > left + 0.75*width (%.2f)' % (tip_x, left + 0.75*width))
# extra material for pasting onto end
a = atoms.get_calculator().parameters['a']
extra = triangular_lattice_slab(a, M, N)
# apply uniform strain and append to slab
strain = get_strain(atoms)
extra.center(vacuum, axis=1)
fix = atoms.get_array('fix')
extra.positions[:, 0] += atoms.positions[fix, 0].max() + a/2.0
extra.positions[:, 1] -= extra.positions[:, 1].mean()
extra.positions[:, 1] *= (1.0 + strain)
print('Adding %d atoms' % len(extra))
atoms += extra
atoms.set_constraint([])
discard = atoms.positions[:, 0].argsort()[:len(extra)]
print('Discarding %d atoms' % len(discard))
del atoms[discard]
return True
|
libAtoms/matscipy
|
matscipy/fracture_mechanics/idealbrittlesolid.py
|
Python
|
lgpl-2.1
| 12,355
|
[
"ASE",
"CRYSTAL",
"Matscipy"
] |
d3808d97d12a4f0e349c22d1026dd27f140ba0dfc31bb4cc8f17abc6f9edabd4
|
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
# $Id:$
'''Low-level graphics rendering.
This module provides an efficient low-level abstraction over OpenGL. It gives
very good performance for rendering OpenGL primitives; far better than the
typical immediate-mode usage and, on modern graphics cards, better than using
display lists in many cases. The module is used internally by other areas of
pyglet.
See the Programming Guide for details on how to use this graphics API.
Batches and groups
==================
Without even needing to understand the details on how to draw primitives with
the graphics API, developers can make use of `Batch` and `Group`
objects to improve performance of sprite and text rendering.
The `Sprite`, `Label` and `TextLayout` classes all accept a ``batch`` and
``group`` parameter in their constructors. A batch manages a set of objects
that will be drawn all at once, and a group describes the manner in which an
object is drawn.
The following example creates a batch, adds two sprites to the batch, and then
draws the entire batch::
batch = pyglet.graphics.Batch()
car = pyglet.sprite.Sprite(car_image, batch=batch)
boat = pyglet.sprite.Sprite(boat_image, batch=batch)
def on_draw()
batch.draw()
Drawing a complete batch is much faster than drawing the items in the batch
individually, especially when those items belong to a common group.
Groups describe the OpenGL state required for an item. This is for the most
part managed by the sprite and text classes, however you can also use groups
to ensure items are drawn in a particular order. For example, the following
example adds a background sprite which is guaranteed to be drawn before the
car and the boat::
batch = pyglet.graphics.Batch()
background = pyglet.graphics.OrderedGroup(0)
foreground = pyglet.graphics.OrderedGroup(1)
background = pyglet.sprite.Sprite(background_image,
batch=batch, group=background)
car = pyglet.sprite.Sprite(car_image, batch=batch, group=foreground)
boat = pyglet.sprite.Sprite(boat_image, batch=batch, group=foreground)
def on_draw()
batch.draw()
It's preferable to manage sprites and text objects within as few batches as
possible. If the drawing of sprites or text objects need to be interleaved
with other drawing that does not use the graphics API, multiple batches will
be required.
Data item parameters
====================
Many of the functions and methods in this module accept any number of ``data``
parameters as their final parameters. In the documentation these are notated
as ``*data`` in the formal parameter list.
A data parameter describes a vertex attribute format and an optional sequence
to initialise that attribute. Examples of common attribute formats are:
``"v3f"``
Vertex position, specified as three floats.
``"c4B"``
Vertex color, specifed as four unsigned bytes.
``"t2f"``
Texture coordinate, specified as two floats.
See `pyglet.graphics.vertexattribute` for the complete syntax of the vertex
format string.
When no initial data is to be given, the data item is just the format string.
For example, the following creates a 2 element vertex list with position and
color attributes::
vertex_list = pyglet.graphics.vertex_list(2, 'v2f', 'c4B')
When initial data is required, wrap the format string and the initial data in
a tuple, for example::
vertex_list = pyglet.graphics.vertex_list(2,
('v2f', (0.0, 1.0, 1.0, 0.0)),
('c4B', (255, 255, 255, 255) * 2))
Drawing modes
=============
Methods in this module that accept a ``mode`` parameter will accept any value
in the OpenGL drawing mode enumeration; for example, ``GL_POINTS``,
``GL_LINES``, ``GL_TRIANGLES``, etc.
Because of the way the graphics API renders multiple primitives with shared
state, ``GL_POLYGON``, ``GL_LINE_LOOP`` and ``GL_TRIANGLE_FAN`` cannot be used
--- the results are undefined.
When using ``GL_LINE_STRIP``, ``GL_TRIANGLE_STRIP`` or ``GL_QUAD_STRIP`` care
must be taken to insert degenrate vertices at the beginning and end of each
vertex list. For example, given the vertex list::
A, B, C, D
the correct vertex list to provide the vertex list is::
A, A, B, C, D, D
Alternatively, the ``NV_primitive_restart`` extension can be used if it is
present. This also permits use of ``GL_POLYGON``, ``GL_LINE_LOOP`` and
``GL_TRIANGLE_FAN``. Unfortunatley the extension is not provided by older
video drivers, and requires indexed vertex lists.
:since: pyglet 1.1
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
import ctypes
import pyglet
from pyglet.gl import *
from pyglet import gl
from pyglet.graphics import vertexbuffer, vertexattribute, vertexdomain
_debug_graphics_batch = pyglet.options['debug_graphics_batch']
def draw(size, mode, *data):
'''Draw a primitive immediately.
:Parameters:
`size` : int
Number of vertices given
`mode` : int
OpenGL drawing mode, e.g. ``GL_TRIANGLES``
`data` : data items
Attribute formats and data. See the module summary for
details.
'''
glPushClientAttrib(GL_CLIENT_VERTEX_ARRAY_BIT)
buffers = []
for format, array in data:
attribute = vertexattribute.create_attribute(format)
assert size == len(array) // attribute.count, \
'Data for %s is incorrect length' % format
buffer = vertexbuffer.create_mappable_buffer(
size * attribute.stride, vbo=False)
attribute.set_region(buffer, 0, size, array)
attribute.enable()
attribute.set_pointer(buffer.ptr)
buffers.append(buffer)
glDrawArrays(mode, 0, size)
glFlush()
glPopClientAttrib()
def draw_indexed(size, mode, indices, *data):
'''Draw a primitive with indexed vertices immediately.
:Parameters:
`size` : int
Number of vertices given
`mode` : int
OpenGL drawing mode, e.g. ``GL_TRIANGLES``
`indices` : sequence of int
Sequence of integers giving indices into the vertex list.
`data` : data items
Attribute formats and data. See the module summary for details.
'''
glPushClientAttrib(GL_CLIENT_VERTEX_ARRAY_BIT)
buffers = []
for format, array in data:
attribute = vertexattribute.create_attribute(format)
assert size == len(array) // attribute.count, \
'Data for %s is incorrect length' % format
buffer = vertexbuffer.create_mappable_buffer(
size * attribute.stride, vbo=False)
attribute.set_region(buffer, 0, size, array)
attribute.enable()
attribute.set_pointer(buffer.ptr)
buffers.append(buffer)
if size <= 0xff:
index_type = GL_UNSIGNED_BYTE
index_c_type = ctypes.c_ubyte
elif size <= 0xffff:
index_type = GL_UNSIGNED_SHORT
index_c_type = ctypes.c_ushort
else:
index_type = GL_UNSIGNED_INT
index_c_type = ctypes.c_uint
index_array = (index_c_type * len(indices))(*indices)
glDrawElements(mode, len(indices), index_type, index_array)
glFlush()
glPopClientAttrib()
def _parse_data(data):
'''Given a list of data items, returns (formats, initial_arrays).'''
assert data, 'No attribute formats given'
# Return tuple (formats, initial_arrays).
formats = []
initial_arrays = []
for i, format in enumerate(data):
if isinstance(format, tuple):
format, array = format
initial_arrays.append((i, array))
formats.append(format)
formats = tuple(formats)
return formats, initial_arrays
def _get_default_batch():
shared_object_space = gl.current_context.object_space
try:
return shared_object_space.pyglet_graphics_default_batch
except AttributeError:
shared_object_space.pyglet_graphics_default_batch = Batch()
return shared_object_space.pyglet_graphics_default_batch
def vertex_list(count, *data):
'''Create a `VertexList` not associated with a batch, group or mode.
:Parameters:
`count` : int
The number of vertices in the list.
`data` : data items
Attribute formats and initial data for the vertex list. See the
module summary for details.
:rtype: `VertexList`
'''
# Note that mode=0 because the default batch is never drawn: vertex lists
# returned from this function are drawn directly by the app.
return _get_default_batch().add(count, 0, None, *data)
def vertex_list_indexed(count, indices, *data):
'''Create an `IndexedVertexList` not associated with a batch, group or mode.
:Parameters:
`count` : int
The number of vertices in the list.
`indices` : sequence
Sequence of integers giving indices into the vertex list.
`data` : data items
Attribute formats and initial data for the vertex list. See the
module summary for details.
:rtype: `IndexedVertexList`
'''
# Note that mode=0 because the default batch is never drawn: vertex lists
# returned from this function are drawn directly by the app.
return _get_default_batch().add_indexed(count, 0, None, indices, *data)
class Batch(object):
'''Manage a collection of vertex lists for batched rendering.
Vertex lists are added to a `Batch` using the `add` and `add_indexed`
methods. An optional group can be specified along with the vertex list,
which gives the OpenGL state required for its rendering. Vertex lists
with shared mode and group are allocated into adjacent areas of memory and
sent to the graphics card in a single operation.
Call `VertexList.delete` to remove a vertex list from the batch.
'''
def __init__(self):
'''Create a graphics batch.'''
# Mapping to find domain.
# group -> (attributes, mode, indexed) -> domain
self.group_map = {}
# Mapping of group to list of children.
self.group_children = {}
# List of top-level groups
self.top_groups = []
self._draw_list = []
self._draw_list_dirty = False
def add(self, count, mode, group, *data):
'''Add a vertex list to the batch.
:Parameters:
`count` : int
The number of vertices in the list.
`mode` : int
OpenGL drawing mode enumeration; for example, one of
``GL_POINTS``, ``GL_LINES``, ``GL_TRIANGLES``, etc.
See the module summary for additional information.
`group` : `Group`
Group of the vertex list, or ``None`` if no group is required.
`data` : data items
Attribute formats and initial data for the vertex list. See
the module summary for details.
:rtype: `VertexList`
'''
formats, initial_arrays = _parse_data(data)
domain = self._get_domain(False, mode, group, formats)
domain.__formats = formats
# Create vertex list and initialize
vlist = domain.create(count)
for i, array in initial_arrays:
vlist._set_attribute_data(i, array)
return vlist
def add_indexed(self, count, mode, group, indices, *data):
'''Add an indexed vertex list to the batch.
:Parameters:
`count` : int
The number of vertices in the list.
`mode` : int
OpenGL drawing mode enumeration; for example, one of
``GL_POINTS``, ``GL_LINES``, ``GL_TRIANGLES``, etc.
See the module summary for additional information.
`group` : `Group`
Group of the vertex list, or ``None`` if no group is required.
`indices` : sequence
Sequence of integers giving indices into the vertex list.
`data` : data items
Attribute formats and initial data for the vertex list. See
the module summary for details.
:rtype: `IndexedVertexList`
'''
formats, initial_arrays = _parse_data(data)
domain = self._get_domain(True, mode, group, formats)
# Create vertex list and initialize
vlist = domain.create(count, len(indices))
start = vlist.start
vlist._set_index_data(map(lambda i: i + start, indices))
for i, array in initial_arrays:
vlist._set_attribute_data(i, array)
return vlist
def migrate(self, vertex_list, mode, group, batch):
'''Migrate a vertex list to another batch and/or group.
`vertex_list` and `mode` together identify the vertex list to migrate.
`group` and `batch` are new owners of the vertex list after migration.
The results are undefined if `mode` is not correct or if `vertex_list`
does not belong to this batch (they are not checked and will not
necessarily throw an exception immediately).
`batch` can remain unchanged if only a group change is desired.
:Parameters:
`vertex_list` : `VertexList`
A vertex list currently belonging to this batch.
`mode` : int
The current GL drawing mode of the vertex list.
`group` : `Group`
The new group to migrate to.
`batch` : `Batch`
The batch to migrate to (or the current batch).
'''
formats = vertex_list.domain.__formats
domain = batch._get_domain(False, mode, group, formats)
vertex_list.migrate(domain)
def _get_domain(self, indexed, mode, group, formats):
if group is None:
group = null_group
# Batch group
if group not in self.group_map:
self._add_group(group)
domain_map = self.group_map[group]
# Find domain given formats, indices and mode
key = (formats, mode, indexed)
try:
domain = domain_map[key]
except KeyError:
# Create domain
if indexed:
domain = vertexdomain.create_indexed_domain(*formats)
else:
domain = vertexdomain.create_domain(*formats)
domain_map[key] = domain
self._draw_list_dirty = True
return domain
def _add_group(self, group):
self.group_map[group] = {}
if group.parent is None:
self.top_groups.append(group)
else:
if group.parent not in self.group_map:
self._add_group(group.parent)
if group.parent not in self.group_children:
self.group_children[group.parent] = []
self.group_children[group.parent].append(group)
self._draw_list_dirty = True
def _update_draw_list(self):
'''Visit group tree in preorder and create a list of bound methods
to call.
'''
def visit(group):
draw_list = []
# Draw domains using this group
domain_map = self.group_map[group]
for (formats, mode, indexed), domain in list(domain_map.items()):
# Remove unused domains from batch
if domain._is_empty():
del domain_map[(formats, mode, indexed)]
continue
draw_list.append(
(lambda d, m: lambda: d.draw(m))(domain, mode))
# Sort and visit child groups of this group
children = self.group_children.get(group)
if children:
children.sort()
for child in list(children):
draw_list.extend(visit(child))
if children or domain_map:
return [group.set_state] + draw_list + [group.unset_state]
else:
# Remove unused group from batch
del self.group_map[group]
if group.parent:
self.group_children[group.parent].remove(group)
try:
del self.group_children[group]
except KeyError:
pass
try:
self.top_groups.remove(group)
except ValueError:
pass
return []
self._draw_list = []
self.top_groups.sort()
for group in list(self.top_groups):
self._draw_list.extend(visit(group))
self._draw_list_dirty = False
if _debug_graphics_batch:
self._dump_draw_list()
def _dump_draw_list(self):
def dump(group, indent=''):
print indent, 'Begin group', group
domain_map = self.group_map[group]
for _, domain in domain_map.items():
print indent, ' ', domain
for start, size in zip(*domain.allocator.get_allocated_regions()):
print indent, ' ', 'Region %d size %d:' % (start, size)
for key, attribute in domain.attribute_names.items():
print indent, ' ',
try:
region = attribute.get_region(attribute.buffer,
start, size)
print key, region.array[:]
except:
print key, '(unmappable)'
for child in self.group_children.get(group, ()):
dump(child, indent + ' ')
print indent, 'End group', group
print 'Draw list for %r:' % self
for group in self.top_groups:
dump(group)
def draw(self):
'''Draw the batch.
'''
if self._draw_list_dirty:
self._update_draw_list()
for func in self._draw_list:
func()
def draw_subset(self, vertex_lists):
'''Draw only some vertex lists in the batch.
The use of this method is highly discouraged, as it is quite
inefficient. Usually an application can be redesigned so that batches
can always be drawn in their entirety, using `draw`.
The given vertex lists must belong to this batch; behaviour is
undefined if this condition is not met.
:Parameters:
`vertex_lists` : sequence of `VertexList` or `IndexedVertexList`
Vertex lists to draw.
'''
# Horrendously inefficient.
def visit(group):
group.set_state()
# Draw domains using this group
domain_map = self.group_map[group]
for (_, mode, _), domain in domain_map.items():
for list in vertex_lists:
if list.domain is domain:
list.draw(mode)
# Sort and visit child groups of this group
children = self.group_children.get(group)
if children:
children.sort()
for child in children:
visit(child)
group.unset_state()
self.top_groups.sort()
for group in self.top_groups:
visit(group)
class Group(object):
'''Group of common OpenGL state.
Before a vertex list is rendered, its group's OpenGL state is set; as are
that state's ancestors' states. This can be defined arbitrarily on
subclasses; the default state change has no effect, and groups vertex
lists only in the order in which they are drawn.
'''
def __init__(self, parent=None):
'''Create a group.
:Parameters:
`parent` : `Group`
Group to contain this group; its state will be set before this
state's.
'''
self.parent = parent
def set_state(self):
'''Apply the OpenGL state change.
The default implementation does nothing.'''
pass
def unset_state(self):
'''Repeal the OpenGL state change.
The default implementation does nothing.'''
pass
def set_state_recursive(self):
'''Set this group and its ancestry.
Call this method if you are using a group in isolation: the
parent groups will be called in top-down order, with this class's
`set` being called last.
'''
if self.parent:
self.parent.set_state_recursive()
self.set_state()
def unset_state_recursive(self):
'''Unset this group and its ancestry.
The inverse of `set_state_recursive`.
'''
self.unset_state()
if self.parent:
self.parent.unset_state_recursive()
class NullGroup(Group):
'''The default group class used when ``None`` is given to a batch.
This implementation has no effect.
'''
pass
#: The default group.
#:
#: :type: `Group`
null_group = NullGroup()
class TextureGroup(Group):
'''A group that enables and binds a texture.
Texture groups are equal if their textures' targets and names are equal.
'''
# Don't use this, create your own group classes that are more specific.
# This is just an example.
def __init__(self, texture, parent=None):
'''Create a texture group.
:Parameters:
`texture` : `Texture`
Texture to bind.
`parent` : `Group`
Parent group.
'''
super(TextureGroup, self).__init__(parent)
self.texture = texture
def set_state(self):
glEnable(self.texture.target)
glBindTexture(self.texture.target, self.texture.id)
def unset_state(self):
glDisable(self.texture.target)
def __hash__(self):
return hash((self.texture.target, self.texture.id, self.parent))
def __eq__(self, other):
return (self.__class__ is other.__class__ and
self.texture.target == other.texture.target and
self.texture.id == other.texture.id and
self.parent == other.parent)
def __repr__(self):
return '%s(id=%d)' % (self.__class__.__name__, self.texture.id)
class OrderedGroup(Group):
'''A group with partial order.
Ordered groups with a common parent are rendered in ascending order of
their ``order`` field. This is a useful way to render multiple layers of
a scene within a single batch.
'''
# This can be useful as a top-level group, or as a superclass for other
# groups that need to be ordered.
#
# As a top-level group it's useful because graphics can be composited in a
# known order even if they don't know about each other or share any known
# group.
def __init__(self, order, parent=None):
'''Create an ordered group.
:Parameters:
`order` : int
Order of this group.
`parent` : `Group`
Parent of this group.
'''
super(OrderedGroup, self).__init__(parent)
self.order = order
def __cmp__(self, other):
if isinstance(other, OrderedGroup):
return cmp(self.order, other.order)
return -1
def __eq__(self, other):
return (self.__class__ is other.__class__ and
self.order == other.order and
self.parent == other.parent)
def __hash__(self):
return hash((self.order, self.parent))
def __repr__(self):
return '%s(%d)' % (self.__class__.__name__, self.order)
|
mattpap/sympy-polys
|
sympy/thirdparty/pyglet/pyglet/graphics/__init__.py
|
Python
|
bsd-3-clause
| 25,420
|
[
"VisIt"
] |
2580bc3b0951beb4901d985af73dfea0a4830157a094845654548839c6e4d8f5
|
from types import ModuleType
from typing import List
from cypyler import TMPCypyler
from jinja2 import Template
from .TemplateManager import TemplateManagerBase
from .Context import Context
from .Expression import Expression
NODEGROUP_TEMPLATE = ("{% for node in node_templates %}"
"{{ node }}"
"{% endfor %}")
class TreeBuild:
"""Class responsible for processing a DSL tree and consolidating the generated Cython code.
Parameters:
template_manager: A template manager object.
dsl_contract: A dsl contract object.
libraries: A list containing any libraries that should be statically linked.
include_dirs: A list containing extra include directories.
`Cypyler` adds numpy includes by default.
"""
build_dir_prefix = 'elliptic__'
def __init__(self,
template_manager: TemplateManagerBase,
libraries: List[str]=None,
include_dirs: List[str]=None) -> None:
self.template_manager = template_manager
self.libraries = libraries
self.include_dirs = include_dirs
self.built_module: ModuleType = None
def build(self, root: Expression) -> ModuleType:
"""Processes the DSL tree and returns the built Cython module.
Parameters:
root: The DSL tree root.
"""
full_rendered_template = self._render_tree(node=root, context=Context())
cp = TMPCypyler(self.build_dir_prefix, self.libraries, self.include_dirs)
self.built_module = cp.build(full_rendered_template)
return self.built_module
def _render_tree(self, node: Expression, context: Context) -> str:
children_rendered_templates: List[str] = []
with node.visit(context) as context_delegate:
child: Expression
for child in node.children:
built_node: str = self._render_tree(child, context)
children_rendered_templates.append(built_node)
group_template = Template(NODEGROUP_TEMPLATE)
rendered_group = group_template.render(node_templates=children_rendered_templates)
rendered_node = node.render(self.template_manager,
rendered_group,
context_delegate)
context_delegate.child = rendered_group
return rendered_node
|
padmec-reservoir/ELLIPTIc
|
elliptic/Kernel/TreeBuilder.py
|
Python
|
mit
| 2,437
|
[
"VisIt"
] |
195310eea6888ca5b2cda2a047fad239f63ce1a338b21a826dd77ef5b4041ede
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
#
# Copyright (c) 2010 Brian E. Granger
#
# This file is part of pyzmq.
#
# pyzmq is free software; you can redistribute it and/or modify it under
# the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# pyzmq is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import time
import zmq
from zmq.tests import BaseZMQTestCase
try:
from queue import Queue
except:
from Queue import Queue
#-----------------------------------------------------------------------------
# Tests
#-----------------------------------------------------------------------------
class TestSocket(BaseZMQTestCase):
def test_create(self):
ctx = zmq.Context()
s = ctx.socket(zmq.PUB)
# Superluminal protocol not yet implemented
self.assertRaisesErrno(zmq.EPROTONOSUPPORT, s.bind, 'ftl://')
self.assertRaisesErrno(zmq.EPROTONOSUPPORT, s.connect, 'ftl://')
s.close()
del ctx
def test_unicode_sockopts(self):
"""test setting/getting sockopts with unicode strings"""
topic = u"tést"
p,s = self.create_bound_pair(zmq.PUB, zmq.SUB)
self.assertEquals(s.send_unicode, s.send_unicode)
self.assertEquals(p.recv_unicode, p.recv_unicode)
self.assertRaises(TypeError, s.setsockopt, zmq.SUBSCRIBE, topic)
self.assertRaises(TypeError, s.setsockopt, zmq.IDENTITY, topic)
self.assertRaises(TypeError, s.setsockopt, zmq.AFFINITY, topic)
s.setsockopt_unicode(zmq.SUBSCRIBE, topic)
s.setsockopt_unicode(zmq.IDENTITY, topic, 'utf16')
self.assertRaises(TypeError, s.getsockopt_unicode, zmq.AFFINITY)
self.assertRaises(TypeError, s.getsockopt_unicode, zmq.SUBSCRIBE)
st = s.getsockopt(zmq.IDENTITY)
self.assertEquals(st.decode('utf16'), s.getsockopt_unicode(zmq.IDENTITY, 'utf16'))
time.sleep(0.1) # wait for connection/subscription
p.send_unicode(topic,zmq.SNDMORE)
p.send_unicode(topic*2, encoding='latin-1')
self.assertEquals(topic, s.recv_unicode())
self.assertEquals(topic*2, s.recv_unicode(encoding='latin-1'))
def test_send_unicode(self):
"test sending unicode objects"
a,b = self.create_bound_pair(zmq.PAIR, zmq.PAIR)
self.sockets.extend([a,b])
self.assertEquals(a.setsockopt_unicode, a.setsockopt_unicode)
self.assertEquals(b.getsockopt_unicode, b.getsockopt_unicode)
u = u"çπ§"
self.assertRaises(TypeError, a.send, u,copy=False)
self.assertRaises(TypeError, a.send, u,copy=True)
a.send_unicode(u)
s = b.recv()
self.assertEquals(s,u.encode('utf8'))
self.assertEquals(s.decode('utf8'),u)
a.send_unicode(u,encoding='utf16')
s = b.recv_unicode(encoding='utf16')
self.assertEquals(s,u)
def test_tracker(self):
"test the MessageTracker object for tracking when zmq is done with a buffer"
addr = 'tcp://127.0.0.1'
a = self.context.socket(zmq.XREQ)
port = a.bind_to_random_port(addr)
a.close()
del a
iface = "%s:%i"%(addr,port)
a = self.context.socket(zmq.XREQ)
a.setsockopt(zmq.IDENTITY, "a")
b = self.context.socket(zmq.XREP)
self.sockets.extend([a,b])
a.connect(iface)
p1 = a.send('something', copy=False)
self.assert_(isinstance(p1, zmq.MessageTracker))
self.assertFalse(p1.done)
p2 = a.send_multipart(['something', 'else'], copy=False)
self.assert_(isinstance(p2, zmq.MessageTracker))
self.assertEquals(p2.done, False)
self.assertEquals(p1.done, False)
b.bind(iface)
msg = b.recv_multipart()
self.assertEquals(p1.done, True)
self.assertEquals(msg, ['a', 'something'])
msg = b.recv_multipart()
self.assertEquals(p2.done, True)
self.assertEquals(msg, ['a', 'something', 'else'])
m = zmq.Message("again")
self.assertEquals(m.done, False)
# print m.bytes
p1 = a.send(m, copy=False)
p2 = a.send(m, copy=False)
self.assertEquals(m.done, False)
self.assertEquals(p1.done, False)
self.assertEquals(p2.done, False)
msg = b.recv_multipart()
self.assertEquals(m.done, False)
self.assertEquals(msg, ['a', 'again'])
msg = b.recv_multipart()
self.assertEquals(m.done, False)
self.assertEquals(msg, ['a', 'again'])
self.assertEquals(p1.done, False)
self.assertEquals(p2.done, False)
pm = m.tracker
del m
time.sleep(0.1)
# q.get()
self.assertEquals(p1.done, True)
self.assertEquals(p2.done, True)
def test_close(self):
ctx = zmq.Context()
s = ctx.socket(zmq.PUB)
s.close()
self.assertRaises(zmq.ZMQError, s.bind, '')
self.assertRaises(zmq.ZMQError, s.connect, '')
self.assertRaises(zmq.ZMQError, s.setsockopt, zmq.SUBSCRIBE, '')
self.assertRaises(zmq.ZMQError, s.send, 'asdf')
self.assertRaises(zmq.ZMQError, s.recv)
del ctx
|
takluyver/pyzmq
|
zmq/tests/test_socket.py
|
Python
|
lgpl-3.0
| 5,832
|
[
"Brian"
] |
ce0c44d4109a9c74859ee9c2f0987b25be98921c8b01122e9172a1aab0b24e8d
|
# -*- coding: utf-8 -*-
'''Chemical Engineering Design Library (ChEDL). Utilities for process modeling.
Copyright (C) 2016, 2017, 2018, 2019 Caleb Bell <Caleb.Andrew.Bell@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.'''
from __future__ import division
__all__ = ['Chemical', 'reference_states']
from fluids.constants import epsilon_0
from fluids.core import Reynolds, Capillary, Weber, Bond, Grashof, Peclet_heat
from fluids.core import *
from fluids.numerics import newton, numpy as np
from chemicals.identifiers import *
from chemicals.phase_change import Tb, Tm, Hfus, Tb_methods, Tm_methods, Hfus_methods
from chemicals.critical import Tc, Pc, Vc, Tc_methods, Pc_methods, Vc_methods
from chemicals.acentric import omega, Stiel_polar_factor, omega_methods
from chemicals.triple import Tt, Pt, Tt_methods, Pt_methods
from chemicals.virial import B_from_Z
from chemicals.volume import ideal_gas
from chemicals.reaction import Hfg_methods, S0g_methods, Hfl_methods, Hfs_methods, Hfs, Hfl, Hfg, S0g, S0l, S0s, Gibbs_formation, Hf_basis_converter, entropy_formation
from chemicals.combustion import combustion_stoichiometry, HHV_stoichiometry, LHV_from_HHV
from chemicals.safety import T_flash, T_autoignition, LFL, UFL, TWA, STEL, Ceiling, Skin, Carcinogen, T_flash_methods, T_autoignition_methods, LFL_methods, UFL_methods, TWA_methods, STEL_methods, Ceiling_methods, Skin_methods, Carcinogen_methods
from chemicals.solubility import solubility_parameter
from chemicals.dipole import dipole_moment as dipole, dipole_moment_methods
from chemicals.utils import *
from chemicals.lennard_jones import Stockmayer_methods, molecular_diameter_methods, Stockmayer, molecular_diameter
from chemicals.environment import GWP, ODP, logP, GWP_methods, ODP_methods, logP_methods
from chemicals.refractivity import RI, RI_methods
from chemicals.elements import atom_fractions, mass_fractions, similarity_variable, atoms_to_Hill, simple_formula_parser, molecular_weight, charge_from_formula, periodic_table, homonuclear_elements
from thermo.vapor_pressure import VaporPressure, SublimationPressure
from thermo.phase_change import EnthalpyVaporization, EnthalpySublimation
from thermo.utils import identify_phase
from thermo.thermal_conductivity import ThermalConductivityLiquid, ThermalConductivityGas, ThermalConductivityLiquidMixture, ThermalConductivityGasMixture
from thermo.volume import VolumeGas, VolumeLiquid, VolumeSolid, VolumeLiquidMixture, VolumeGasMixture, VolumeSolidMixture
from thermo.permittivity import *
from thermo.heat_capacity import HeatCapacitySolid, HeatCapacityGas, HeatCapacityLiquid, HeatCapacitySolidMixture, HeatCapacityGasMixture, HeatCapacityLiquidMixture
from thermo.interface import SurfaceTension, SurfaceTensionMixture
from thermo.viscosity import ViscosityLiquid, ViscosityGas, ViscosityLiquidMixture, ViscosityGasMixture
from thermo.utils import *
from thermo.law import legal_status, economic_status
from thermo.electrochem import conductivity, conductivity_methods
from thermo.eos import *
from thermo.eos_mix import *
from thermo.unifac import DDBST_UNIFAC_assignments, DDBST_MODIFIED_UNIFAC_assignments, DDBST_PSRK_assignments, load_group_assignments_DDBST, UNIFAC_RQ, Van_der_Waals_volume, Van_der_Waals_area
caching = True
# Format: (T, P, phase, H, S, molar=True)
IAPWS = (273.16, 611.655, 'l', 0.00922, 0, True) # Water; had to convert Href from mass to molar
ASHRAE = (233.15, 'Psat', 'l', 0, 0, True) # As described in REFPROP
IIR = (273.15, 'Psat', 'l', 200E3, 1000, False) # 200 kj/kg reference, as described in REFPROP
REFPROP = ('Tb', 101325, 'l', 0, 0, True)
CHEMSEP = (298., 101325, 'g', 0, 0, True) # It has an option to add Hf to the reference
PRO_II = (298.15, 101325, 'gas', 0, 0, True)
HYSYS = (298.15, 101325, 'calc', 'Hf', 0, True)
UNISIM = HYSYS #
SUPERPRO = (298.15, 101325, 'calc', 0, 0, True) # No support for entropy found, 0 assumed
# note soecifying a phase works for chemicals but not mixtures.
reference_states = [IAPWS, ASHRAE, IIR, REFPROP, CHEMSEP, PRO_II, HYSYS,
UNISIM, SUPERPRO]
class ChemicalConstants(object):
__slots__ = ('CAS', 'Tc', 'Pc', 'Vc', 'omega', 'Tb', 'Tm', 'Tt', 'Pt',
'Hfus', 'Hsub', 'Hf', 'dipole',
'HeatCapacityGas', 'HeatCapacityLiquid', 'HeatCapacitySolid',
'ThermalConductivityLiquid', 'ThermalConductivityGas',
'ViscosityLiquid', 'ViscosityGas',
'EnthalpyVaporization', 'VaporPressure', 'VolumeLiquid',
'EnthalpySublimation', 'SublimationPressure', 'SurfaceTension',
'VolumeSolid',
'VolumeSupercriticalLiquid', 'PermittivityLiquid',
)
# Or can I store the actual objects without doing the searches?
def __init__(self, CAS, Tc=None, Pc=None, Vc=None, omega=None, Tb=None,
Tm=None, Tt=None, Pt=None, Hfus=None, Hsub=None, Hf=None,
dipole=None,
HeatCapacityGas=(), HeatCapacityLiquid=(),
HeatCapacitySolid=(),
ThermalConductivityLiquid=(), ThermalConductivityGas=(),
ViscosityLiquid=(), ViscosityGas=(),
EnthalpyVaporization=(), VaporPressure=(), VolumeLiquid=(),
SublimationPressure=(), EnthalpySublimation=(),
SurfaceTension=(), VolumeSolid=(), VolumeSupercriticalLiquid=(),
PermittivityLiquid=(),
):
self.CAS = CAS
self.Tc = Tc
self.Pc = Pc
self.Vc = Vc
self.omega = omega
self.Tb = Tb
self.Tm = Tm
self.Tt = Tt
self.Pt = Pt
self.Hfus = Hfus
self.Hsub = Hsub
self.Hf = Hf
self.dipole = dipole
self.HeatCapacityGas = HeatCapacityGas
self.HeatCapacityLiquid = HeatCapacityLiquid
self.HeatCapacitySolid = HeatCapacitySolid
self.ThermalConductivityLiquid = ThermalConductivityLiquid
self.ThermalConductivityGas = ThermalConductivityGas
self.ViscosityLiquid = ViscosityLiquid
self.ViscosityGas = ViscosityGas
self.EnthalpyVaporization = EnthalpyVaporization
self.EnthalpySublimation = EnthalpySublimation
self.VaporPressure = VaporPressure
self.SublimationPressure = SublimationPressure
self.VolumeLiquid = VolumeLiquid
self.SurfaceTension = SurfaceTension
self.VolumeSolid = VolumeSolid
self.VolumeSupercriticalLiquid = VolumeSupercriticalLiquid
self.PermittivityLiquid = PermittivityLiquid
empty_chemical_constants = ChemicalConstants(None)
_chemical_cache = {}
property_lock = False
def lock_properties(status):
global property_lock
global _chemical_cache
if property_lock == status:
return True
else:
_chemical_cache.clear()
property_lock = status
return True
def get_chemical_constants(CAS, key):
global property_lock
if not property_lock:
return None
from thermo.database import loaded_chemicals
try:
vs = getattr(loaded_chemicals[CAS], key)
if all(i is not None for i in vs):
return vs
# Tmin, Tmax, coeffs = getattr(loaded_chemicals[CAS], key)
# if Tmin is not None and Tmax is not None and coeffs is not None:
# return (Tmin, Tmax, coeffs)
return None
except KeyError:
return None
class Chemical(object): # pragma: no cover
'''Creates a Chemical object which contains basic information such as
molecular weight and the structure of the species, as well as thermodynamic
and transport properties as a function of temperature and pressure.
Parameters
----------
ID : str
One of the following [-]:
* Name, in IUPAC form or common form or a synonym registered in PubChem
* InChI name, prefixed by 'InChI=1S/' or 'InChI=1/'
* InChI key, prefixed by 'InChIKey='
* PubChem CID, prefixed by 'PubChem='
* SMILES (prefix with 'SMILES=' to ensure smiles parsing)
* CAS number
T : float, optional
Temperature of the chemical (default 298.15 K), [K]
P : float, optional
Pressure of the chemical (default 101325 Pa) [Pa]
Examples
--------
Creating chemical objects:
>>> Chemical('hexane')
<Chemical [hexane], T=298.15 K, P=101325 Pa>
>>> Chemical('CCCCCCCC', T=500, P=1E7)
<Chemical [octane], T=500.00 K, P=10000000 Pa>
>>> Chemical('7440-36-0', P=1000)
<Chemical [antimony], T=298.15 K, P=1000 Pa>
Getting basic properties:
>>> N2 = Chemical('Nitrogen')
>>> N2.Tm, N2.Tb, N2.Tc # melting, boiling, and critical points [K]
(63.15, 77.355, 126.2)
>>> N2.Pt, N2.Pc # sublimation and critical pressure [Pa]
(12526.9697368421, 3394387.5)
>>> N2.CAS, N2.formula, N2.InChI, N2.smiles, N2.atoms # CAS number, formula, InChI string, smiles string, dictionary of atomic elements and their count
('7727-37-9', 'N2', 'N2/c1-2', 'N#N', {'N': 2})
Changing the T/P of the chemical, and gettign temperature-dependent
properties:
>>> N2.Cp, N2.rho, N2.mu # Heat capacity [J/kg/K], density [kg/m^3], viscosity [Pa*s]
(1039.4978324480921, 1.1452416223829405, 1.7804740647270688e-05)
>>> N2.calculate(T=65, P=1E6) # set it to a liquid at 65 K and 1 MPa
>>> N2.phase
'l'
>>> N2.Cp, N2.rho, N2.mu # properties are now of the liquid phase
(2002.8819854804037, 861.3539919443364, 0.0002857739143670701)
Molar units are also available for properties:
>>> N2.Cpm, N2.Vm, N2.Hvapm # heat capacity [J/mol/K], molar volume [m^3/mol], enthalpy of vaporization [J/mol]
(56.10753421205674, 3.252251717875631e-05, 5982.710998291719)
A great deal of properties are available; for a complete list look at the
attributes list.
>>> N2.alpha, N2.JT # thermal diffusivity [m^2/s], Joule-Thompson coefficient [K/Pa]
(9.874883993253272e-08, -4.0009932695519242e-07)
>>> N2.isentropic_exponent, N2.isobaric_expansion
(1.4000000000000001, 0.0047654228408661571)
For pure species, the phase is easily identified, allowing for properties
to be obtained without needing to specify the phase. However, the
properties are also available in the hypothetical gas phase (when under the
boiling point) and in the hypothetical liquid phase (when above the boiling
point) as these properties are needed to evaluate mixture properties.
Specify the phase of a property to be retrieved by appending 'l' or 'g' or
's' to the property.
>>> tol = Chemical('toluene')
>>> tol.rhog, tol.Cpg, tol.kg, tol.mug
(4.241646701894199, 1126.5533755283168, 0.00941385692301755, 6.973325939594919e-06)
Temperature dependent properties are calculated by objects which provide
many useful features related to the properties. To determine the
temperature at which nitrogen has a saturation pressure of 1 MPa:
>>> N2.VaporPressure.solve_property(1E6)
103.73528598652341
To compute an integral of the ideal-gas heat capacity of nitrogen
to determine the enthalpy required for a given change in temperature.
Note the thermodynamic objects calculate values in molar units always.
>>> N2.HeatCapacityGas.T_dependent_property_integral(100, 120) # J/mol/K
582.0121860897898
Derivatives of properties can be calculated as well, as may be needed by
for example heat transfer calculations:
>>> N2.SurfaceTension.T_dependent_property_derivative(77)
-0.00022695346296730534
If a property is needed at multiple temperatures or pressures, it is faster
to use the object directly to perform the calculation rather than setting
the conditions for the chemical.
>>> [N2.VaporPressure(T) for T in range(80, 120, 10)]
[136979.4840843189, 360712.5746603142, 778846.276691705, 1466996.7208525643]
These objects are also how the methods by which the properties are
calculated can be changed. To see the available methods for a property:
>>> N2.VaporPressure.all_methods
set(['VDI_PPDS', 'BOILING_CRITICAL', 'WAGNER_MCGARRY', 'AMBROSE_WALTON', 'COOLPROP', 'LEE_KESLER_PSAT', 'EOS', 'ANTOINE_POLING', 'SANJARI', 'DIPPR_PERRY_8E', 'Edalat', 'WAGNER_POLING'])
To specify the method which should be used for calculations of a property.
In the example below, the Lee-kesler correlation for vapor pressure is
specified.
>>> N2.calculate(80)
>>> N2.Psat
136979.4840843189
>>> N2.VaporPressure.method = 'LEE_KESLER_PSAT'
>>> N2.Psat
134987.76815364443
For memory reduction, these objects are shared by all chemicals which are
the same; new instances will use the same specified methods.
>>> N2_2 = Chemical('nitrogen')
>>> N2_2.VaporPressure.user_methods
['LEE_KESLER_PSAT']
To disable this behavior, set thermo.chemical.caching to False.
>>> import thermo
>>> thermo.chemical.caching = False
>>> N2_3 = Chemical('nitrogen')
>>> N2_3.VaporPressure.user_methods
[]
Properties may also be plotted via these objects:
>>> N2.VaporPressure.plot_T_dependent_property() # doctest: +SKIP
>>> N2.VolumeLiquid.plot_isotherm(T=77, Pmin=1E5, Pmax=1E7) # doctest: +SKIP
>>> N2.VolumeLiquid.plot_isobar(P=1E6, Tmin=66, Tmax=120) # doctest: +SKIP
>>> N2.VolumeLiquid.plot_TP_dependent_property(Tmin=60, Tmax=100, Pmin=1E5, Pmax=1E7) # doctest: +SKIP
Notes
-----
.. warning::
The Chemical class is not designed for high-performance or the ability
to use different thermodynamic models. It is especially limited in its
multiphase support and the ability to solve with specifications other
than temperature and pressure. It is impossible to change constant
properties such as a compound's critical temperature in this interface.
It is recommended to switch over to the :obj:`thermo.flash` interface
which solves those problems and is better positioned to grow. That
interface also requires users to be responsible for their chemical
constants and pure component correlations; while default values can
easily be loaded for most compounds, the user is ultimately responsible
for them.
Attributes
----------
T : float
Temperature of the chemical, [K]
P : float
Pressure of the chemical, [Pa]
phase : str
Phase of the chemical; one of 's', 'l', 'g', or 'l/g'.
ID : str
User specified string by which the chemical's CAS was looked up.
CAS : str
The CAS number of the chemical.
PubChem : int
PubChem Compound identifier (CID) of the chemical; all chemicals are
sourced from their database. Chemicals can be looked at online at
`<https://pubchem.ncbi.nlm.nih.gov>`_.
MW : float
Molecular weight of the compound, [g/mol]
formula : str
Molecular formula of the compound.
atoms : dict
dictionary of counts of individual atoms, indexed by symbol with
proper capitalization, [-]
similarity_variable : float
Similarity variable, see :obj:`chemicals.elements.similarity_variable`
for the definition, [mol/g]
smiles : str
Simplified molecular-input line-entry system representation of the
compound.
InChI : str
IUPAC International Chemical Identifier of the compound.
InChI_Key : str
25-character hash of the compound's InChI.
IUPAC_name : str
Preferred IUPAC name for a compound.
synonyms : list of strings
All synonyms for the compound found in PubChem, sorted by popularity.
Tm : float
Melting temperature [K]
Tb : float
Boiling temperature [K]
Tc : float
Critical temperature [K]
Pc : float
Critical pressure [Pa]
Vc : float
Critical volume [m^3/mol]
Zc : float
Critical compressibility [-]
rhoc : float
Critical density [kg/m^3]
rhocm : float
Critical molar density [mol/m^3]
omega : float
Acentric factor [-]
StielPolar : float
Stiel Polar factor, see :obj:`chemicals.acentric.Stiel_polar_factor` for
the definition [-]
Tt : float
Triple temperature, [K]
Pt : float
Triple pressure, [Pa]
Hfus : float
Enthalpy of fusion [J/kg]
Hfusm : float
Molar enthalpy of fusion [J/mol]
Hsub : float
Enthalpy of sublimation [J/kg]
Hsubm : float
Molar enthalpy of sublimation [J/mol]
Hfm : float
Standard state molar enthalpy of formation, [J/mol]
Hf : float
Standard enthalpy of formation in a mass basis, [J/kg]
Hfgm : float
Ideal-gas molar enthalpy of formation, [J/mol]
Hfg : float
Ideal-gas enthalpy of formation in a mass basis, [J/kg]
Hcm : float
Molar higher heat of combustion [J/mol]
Hc : float
Higher Heat of combustion [J/kg]
Hcm_lower : float
Molar lower heat of combustion [J/mol]
Hc_lower : float
Lower Heat of combustion [J/kg]
S0m : float
Standard state absolute molar entropy of the chemical, [J/mol/K]
S0 : float
Standard state absolute entropy of the chemical, [J/kg/K]
S0gm : float
Absolute molar entropy in an ideal gas state of the chemical, [J/mol/K]
S0g : float
Absolute mass entropy in an ideal gas state of the chemical, [J/kg/K]
Gfm : float
Standard state molar change of Gibbs energy of formation [J/mol]
Gf : float
Standard state change of Gibbs energy of formation [J/kg]
Gfgm : float
Ideal-gas molar change of Gibbs energy of formation [J/mol]
Gfg : float
Ideal-gas change of Gibbs energy of formation [J/kg]
Sfm : float
Standard state molar change of entropy of formation, [J/mol/K]
Sf : float
Standard state change of entropy of formation, [J/kg/K]
Sfgm : float
Ideal-gas molar change of entropy of formation, [J/mol/K]
Sfg : float
Ideal-gas change of entropy of formation, [J/kg/K]
Hcgm : float
Higher molar heat of combustion of the chemical in the ideal gas state,
[J/mol]
Hcg : float
Higher heat of combustion of the chemical in the ideal gas state,
[J/kg]
Hcgm_lower : float
Lower molar heat of combustion of the chemical in the ideal gas state,
[J/mol]
Hcg_lower : float
Lower heat of combustion of the chemical in the ideal gas state,
[J/kg]
Tflash : float
Flash point of the chemical, [K]
Tautoignition : float
Autoignition point of the chemical, [K]
LFL : float
Lower flammability limit of the gas in an atmosphere at STP, mole
fraction [-]
UFL : float
Upper flammability limit of the gas in an atmosphere at STP, mole
fraction [-]
TWA : tuple[quantity, unit]
Time-Weighted Average limit on worker exposure to dangerous chemicals.
STEL : tuple[quantity, unit]
Short-term Exposure limit on worker exposure to dangerous chemicals.
Ceiling : tuple[quantity, unit]
Ceiling limits on worker exposure to dangerous chemicals.
Skin : bool
Whether or not a chemical can be absorbed through the skin.
Carcinogen : str or dict
Carcinogen status information.
dipole : float
Dipole moment in debye, [3.33564095198e-30 ampere*second^2]
Stockmayer : float
Lennard-Jones depth of potential-energy minimum over k, [K]
molecular_diameter : float
Lennard-Jones molecular diameter, [angstrom]
GWP : float
Global warming potential (default 100-year outlook) (impact/mass
chemical)/(impact/mass CO2), [-]
ODP : float
Ozone Depletion potential (impact/mass chemical)/(impact/mass CFC-11),
[-]
logP : float
Octanol-water partition coefficient, [-]
legal_status : str or dict
Legal status information [-]
economic_status : list
Economic status information [-]
RI : float
Refractive Index on the Na D line, [-]
RIT : float
Temperature at which refractive index reading was made
conductivity : float
Electrical conductivity of the fluid, [S/m]
conductivityT : float
Temperature at which conductivity measurement was made
VaporPressure : object
Instance of :obj:`thermo.vapor_pressure.VaporPressure`, with data and
methods loaded for the chemical; performs the actual calculations of
vapor pressure of the chemical.
EnthalpyVaporization : object
Instance of :obj:`thermo.phase_change.EnthalpyVaporization`, with data
and methods loaded for the chemical; performs the actual calculations
of molar enthalpy of vaporization of the chemical.
VolumeSolid : object
Instance of :obj:`thermo.volume.VolumeSolid`, with data and methods
loaded for the chemical; performs the actual calculations of molar
volume of the solid phase of the chemical.
VolumeLiquid : object
Instance of :obj:`thermo.volume.VolumeLiquid`, with data and methods
loaded for the chemical; performs the actual calculations of molar
volume of the liquid phase of the chemical.
VolumeGas : object
Instance of :obj:`thermo.volume.VolumeGas`, with data and methods
loaded for the chemical; performs the actual calculations of molar
volume of the gas phase of the chemical.
HeatCapacitySolid : object
Instance of :obj:`thermo.heat_capacity.HeatCapacitySolid`, with data and
methods loaded for the chemical; performs the actual calculations of
molar heat capacity of the solid phase of the chemical.
HeatCapacityLiquid : object
Instance of :obj:`thermo.heat_capacity.HeatCapacityLiquid`, with data and
methods loaded for the chemical; performs the actual calculations of
molar heat capacity of the liquid phase of the chemical.
HeatCapacityGas : object
Instance of :obj:`thermo.heat_capacity.HeatCapacityGas`, with data and
methods loaded for the chemical; performs the actual calculations of
molar heat capacity of the gas phase of the chemical.
ViscosityLiquid : object
Instance of :obj:`thermo.viscosity.ViscosityLiquid`, with data and
methods loaded for the chemical; performs the actual calculations of
viscosity of the liquid phase of the chemical.
ViscosityGas : object
Instance of :obj:`thermo.viscosity.ViscosityGas`, with data and
methods loaded for the chemical; performs the actual calculations of
viscosity of the gas phase of the chemical.
ThermalConductivityLiquid : object
Instance of :obj:`thermo.thermal_conductivity.ThermalConductivityLiquid`,
with data and methods loaded for the chemical; performs the actual
calculations of thermal conductivity of the liquid phase of the
chemical.
ThermalConductivityGas : object
Instance of :obj:`thermo.thermal_conductivity.ThermalConductivityGas`,
with data and methods loaded for the chemical; performs the actual
calculations of thermal conductivity of the gas phase of the chemical.
SurfaceTension : object
Instance of :obj:`thermo.interface.SurfaceTension`, with data and
methods loaded for the chemical; performs the actual calculations of
surface tension of the chemical.
Permittivity : object
Instance of :obj:`thermo.permittivity.PermittivityLiquid`, with data and
methods loaded for the chemical; performs the actual calculations of
permittivity of the chemical.
Psat_298 : float
Vapor pressure of the chemical at 298.15 K, [Pa]
phase_STP : str
Phase of the chemical at 298.15 K and 101325 Pa; one of 's', 'l', 'g',
or 'l/g'.
Vml_Tb : float
Molar volume of liquid phase at the normal boiling point [m^3/mol]
Vml_Tm : float
Molar volume of liquid phase at the melting point [m^3/mol]
Vml_STP : float
Molar volume of liquid phase at 298.15 K and 101325 Pa [m^3/mol]
rhoml_STP : float
Molar density of liquid phase at 298.15 K and 101325 Pa [mol/m^3]
Vmg_STP : float
Molar volume of gas phase at 298.15 K and 101325 Pa according to
the ideal gas law, [m^3/mol]
Vms_Tm : float
Molar volume of solid phase at the melting point [m^3/mol]
rhos_Tm : float
Mass density of solid phase at the melting point [kg/m^3]
Hvap_Tbm : float
Molar enthalpy of vaporization at the normal boiling point [J/mol]
Hvap_Tb : float
Mass enthalpy of vaporization at the normal boiling point [J/kg]
Hvapm_298 : float
Molar enthalpy of vaporization at 298.15 K [J/mol]
Hvap_298 : float
Mass enthalpy of vaporization at 298.15 K [J/kg]
alpha
alphag
alphal
API
aromatic_rings
atom_fractions
Bvirial
charge
Cp
Cpg
Cpgm
Cpl
Cplm
Cpm
Cps
Cpsm
Cvg
Cvgm
eos
Hill
Hvap
Hvapm
isentropic_exponent
isobaric_expansion
isobaric_expansion_g
isobaric_expansion_l
JT
JTg
JTl
k
kg
kl
mass_fractions
mu
mug
mul
nu
nug
nul
Parachor
permittivity
Poynting
Pr
Prg
Prl
Psat
PSRK_groups
rdkitmol
rdkitmol_Hs
rho
rhog
rhogm
rhol
rholm
rhom
rhos
rhosm
rings
SG
SGg
SGl
SGs
sigma
solubility_parameter
UNIFAC_Dortmund_groups
UNIFAC_groups
UNIFAC_R
UNIFAC_Q
Van_der_Waals_area
Van_der_Waals_volume
Vm
Vmg
Vml
Vms
Z
Zg
Zl
Zs
'''
__atom_fractions = None
__mass_fractions = None
__UNIFAC_groups = None
__UNIFAC_Dortmund_groups = None
__PSRK_groups = None
__rdkitmol = None
__rdkitmol_Hs = None
__Hill = None
__legal_status = None
__economic_status = None
def __repr__(self):
return '<Chemical [%s], T=%.2f K, P=%.0f Pa>' %(self.name, self.T, self.P)
def __init__(self, ID, T=298.15, P=101325, autocalc=True):
if isinstance(ID, dict):
self.CAS = ID['CASRN']
self.ID = self.name = ID['name']
self.formula = ID['formula']
# DO NOT REMOVE molecular_weight until the database gets updated with consistent MWs
self.MW = ID['MW'] if 'MW' in ID else molecular_weight(simple_formula_parser(self.formula))
self.PubChem = ID['PubChem'] if 'PubChem' in ID else None
self.smiles = ID['smiles'] if 'smiles' in ID else None
self.InChI = ID['InChI'] if 'InChI' in ID else None
self.InChI_Key = ID['InChI_Key'] if 'InChI_Key' in ID else None
self.synonyms = ID['synonyms'] if 'synonyms' in ID else None
else:
self.ID = ID
# Identification
self.ChemicalMetadata = search_chemical(ID)
self.CAS = self.ChemicalMetadata.CASs
if self.CAS in _chemical_cache and caching:
self.__dict__.update(_chemical_cache[self.CAS].__dict__)
self.autocalc = autocalc
self.calculate(T, P)
else:
self.autocalc = autocalc
if not isinstance(ID, dict):
self.PubChem = self.ChemicalMetadata.pubchemid
self.formula = self.ChemicalMetadata.formula
self.MW = molecular_weight(simple_formula_parser(self.formula)) # self.ChemicalMetadata.MW
self.smiles = self.ChemicalMetadata.smiles
self.InChI = self.ChemicalMetadata.InChI
self.InChI_Key = self.ChemicalMetadata.InChI_key
self.IUPAC_name = self.ChemicalMetadata.iupac_name.lower()
self.name = self.ChemicalMetadata.common_name.lower()
self.synonyms = self.ChemicalMetadata.synonyms
self.atoms = simple_formula_parser(self.formula)
self.similarity_variable = similarity_variable(self.atoms, self.MW)
self.eos_in_a_box = []
self.set_constant_sources()
self.set_constants()
self.set_eos(T=T, P=P)
self.set_TP_sources()
if self.autocalc:
self.set_ref()
self.calculate(T, P)
if len(_chemical_cache) < 1000:
_chemical_cache[self.CAS] = self
def calculate(self, T=None, P=None):
if (hasattr(self, 'T') and T == self.T and hasattr(self, 'P') and P == self.P):
return None
if T:
if T < 0:
raise ValueError('Negative value specified for Chemical temperature - aborting!')
self.T = T
if P:
if P < 0:
raise ValueError('Negative value specified for Chemical pressure - aborting!')
self.P = P
if self.autocalc:
self.phase = identify_phase(T=self.T, P=self.P, Tm=self.Tm, Tb=self.Tb, Tc=self.Tc, Psat=self.Psat)
self.eos = self.eos.to_TP(T=self.T, P=self.P)
self.eos_in_a_box[0] = self.eos
self.set_thermo()
def draw_2d(self, width=300, height=300, Hs=False): # pragma: no cover
r'''Interface for drawing a 2D image of the molecule.
Requires an HTML5 browser, and the libraries RDKit and
IPython. An exception is raised if either of these libraries is
absent.
Parameters
----------
width : int
Number of pixels wide for the view
height : int
Number of pixels tall for the view
Hs : bool
Whether or not to show hydrogen
Examples
--------
>>> Chemical('decane').draw_2d() # doctest: +ELLIPSIS
<PIL.Image.Image image mode=RGBA size=300x300 at 0x...>
'''
try:
from rdkit.Chem import Draw
from rdkit.Chem.Draw import IPythonConsole
if Hs:
mol = self.rdkitmol_Hs
else:
mol = self.rdkitmol
return Draw.MolToImage(mol, size=(width, height))
except:
return 'Rdkit is required for this feature.'
def draw_3d(self, width=300, height=500, style='stick', Hs=True,
atom_labels=True): # pragma: no cover
r'''Interface for drawing an interactive 3D view of the molecule.
Requires an HTML5 browser, and the libraries RDKit, pymol3D, and
IPython. An exception is raised if all three of these libraries are
not installed.
Parameters
----------
width : int
Number of pixels wide for the view, [pixels]
height : int
Number of pixels tall for the view, [pixels]
style : str
One of 'stick', 'line', 'cross', or 'sphere', [-]
Hs : bool
Whether or not to show hydrogen, [-]
atom_labels : bool
Whether or not to label the atoms, [-]
Examples
--------
>>> Chemical('cubane').draw_3d()
<IPython.core.display.HTML object>
'''
try:
from rdkit import Chem
from rdkit.Chem import AllChem
import py3Dmol
from IPython.display import display
if Hs:
mol = self.rdkitmol_Hs
else:
mol = self.rdkitmol
AllChem.EmbedMultipleConfs(mol)
mb = Chem.MolToMolBlock(mol)
p = py3Dmol.view(width=width, height=height)
p.addModel(mb,'sdf')
p.setStyle({style:{}})
if atom_labels:
p.addPropertyLabels("atom","",{'alignment': 'center'})
p.zoomTo()
display(p.show())
except:
return 'py3Dmol, RDKit, and IPython are required for this feature.'
def set_constant_sources(self):
self.Tm_sources = Tm_methods(CASRN=self.CAS)
self.Tm_source = self.Tm_sources[0] if self.Tm_sources else None
self.Tb_sources = Tb_methods(CASRN=self.CAS)
self.Tb_source = self.Tb_sources[0] if self.Tb_sources else None
# Critical Point
self.Tc_methods = Tc_methods(self.CAS)
self.Tc_method = self.Tc_methods[0] if self.Tc_methods else None
self.Pc_methods = Pc_methods(self.CAS)
self.Pc_method = self.Pc_methods[0] if self.Pc_methods else None
self.Vc_methods = Vc_methods(self.CAS)
self.Vc_method = self.Vc_methods[0] if self.Vc_methods else None
self.omega_methods = omega_methods(self.CAS)
self.omega_method = self.omega_methods[0] if self.omega_methods else None
# Triple point
self.Tt_sources = Tt_methods(self.CAS)
self.Tt_source = self.Tt_sources[0] if self.Tt_sources else None
self.Pt_sources = Pt_methods(self.CAS)
self.Pt_source = self.Pt_sources[0] if self.Pt_sources else None
# Enthalpy
self.Hfus_methods = Hfus_methods(CASRN=self.CAS)
self.Hfus_method = self.Hfus_methods[0] if self.Hfus_methods else None
# Fire Safety Limits
self.Tflash_sources = T_flash_methods(self.CAS)
self.Tflash_source = self.Tflash_sources[0] if self.Tflash_sources else None
self.Tautoignition_sources = T_autoignition_methods(self.CAS)
self.Tautoignition_source = self.Tautoignition_sources[0] if self.Tautoignition_sources else None
# Chemical Exposure Limits
self.TWA_sources = TWA_methods(self.CAS)
self.TWA_source = self.TWA_sources[0] if self.TWA_sources else None
self.STEL_sources = STEL_methods(self.CAS)
self.STEL_source = self.STEL_sources[0] if self.STEL_sources else None
self.Ceiling_sources = Ceiling_methods(self.CAS)
self.Ceiling_source = self.Ceiling_sources[0] if self.Ceiling_sources else None
self.Skin_sources = Skin_methods(self.CAS)
self.Skin_source = self.Skin_sources[0] if self.Skin_sources else None
# self.Carcinogen_sources = Carcinogen_methods(self.CAS)
# self.Carcinogen_source = self.Carcinogen_sources[0] if self.Carcinogen_sources else None
self.Hfg_sources = Hfg_methods(CASRN=self.CAS)
self.Hfg_source = self.Hfg_sources[0] if self.Hfg_sources else None
self.S0g_sources = S0g_methods(CASRN=self.CAS)
self.S0g_source = self.S0g_sources[0] if self.S0g_sources else None
# Misc
self.dipole_sources = dipole_moment_methods(CASRN=self.CAS)
self.dipole_source = self.dipole_sources[0] if self.dipole_sources else None
# Environmental
self.GWP_sources = GWP_methods(CASRN=self.CAS)
self.GWP_source = self.GWP_sources[0] if self.GWP_sources else None
self.ODP_sources = ODP_methods(CASRN=self.CAS)
self.ODP_source = self.ODP_sources[0] if self.ODP_sources else None
self.logP_sources = logP_methods(CASRN=self.CAS)
self.logP_source = self.logP_sources[0] if self.logP_sources else None
# Analytical
self.RI_sources = RI_methods(CASRN=self.CAS)
self.RI_source = self.RI_sources[0] if self.RI_sources else None
self.conductivity_sources = conductivity_methods(CASRN=self.CAS)
self.conductivity_source = self.conductivity_sources[0] if self.conductivity_sources else None
def set_constants(self):
self.Tm = Tm(self.CAS, method=self.Tm_source)
self.Tb = Tb(self.CAS, method=self.Tb_source)
# Critical Point
self.Tc = Tc(self.CAS, method=self.Tc_method)
self.Pc = Pc(self.CAS, method=self.Pc_method)
self.Vc = Vc(self.CAS, method=self.Vc_method)
self.omega = omega(self.CAS, method=self.omega_method)
self.Zc = Z(self.Tc, self.Pc, self.Vc) if all((self.Tc, self.Pc, self.Vc)) else None
self.rhoc = Vm_to_rho(self.Vc, self.MW) if self.Vc else None
self.rhocm = 1./self.Vc if self.Vc else None
# Triple point
self.Pt = Pt(self.CAS, method=self.Pt_source)
self.Tt = Tt(self.CAS, method=self.Tt_source)
# Enthalpy
self.Hfusm = Hfus(method=self.Hfus_method, CASRN=self.CAS)
self.Hfus = property_molar_to_mass(self.Hfusm, self.MW) if self.Hfusm is not None else None
# Chemical Exposure Limits
self.TWA = TWA(self.CAS, method=self.TWA_source)
self.STEL = STEL(self.CAS, method=self.STEL_source)
self.Ceiling = Ceiling(self.CAS, method=self.Ceiling_source)
self.Skin = Skin(self.CAS, method=self.Skin_source)
self.Carcinogen = Carcinogen(self.CAS)
# Misc
self.dipole = dipole(self.CAS, method=self.dipole_source) # Units of Debye
self.Stockmayer_sources = Stockmayer_methods(Tc=self.Tc, Zc=self.Zc, omega=self.omega, CASRN=self.CAS)
self.Stockmayer_source = self.Stockmayer_sources[0] if self.Stockmayer_sources else None
self.Stockmayer = Stockmayer(Tm=self.Tm, Tb=self.Tb, Tc=self.Tc, Zc=self.Zc, omega=self.omega, method=self.Stockmayer_source, CASRN=self.CAS)
# Environmental
self.GWP = GWP(CASRN=self.CAS, method=self.GWP_source)
self.ODP = ODP(CASRN=self.CAS, method=self.ODP_source)
self.logP = logP(CASRN=self.CAS, method=self.logP_source)
# Analytical
self.RI, self.RIT = RI(CASRN=self.CAS, method=self.RI_source)
self.conductivity, self.conductivityT = conductivity(CASRN=self.CAS, method=self.conductivity_source)
def set_eos(self, T, P, eos=PR):
try:
self.eos = eos(T=T, P=P, Tc=self.Tc, Pc=self.Pc, omega=self.omega)
except:
# Handle overflow errors and so on
self.eos = IG(T=T, P=P)
@property
def eos(self):
r'''Equation of state object held by the chemical; used to calculate
excess thermodynamic quantities, and also provides a vapor pressure
curve, enthalpy of vaporization curve, fugacity, thermodynamic partial
derivatives, and more; see :obj:`thermo.eos` for a full listing.
Examples
--------
>>> Chemical('methane').eos.V_g
0.02441019502181826
'''
return self.eos_in_a_box[0]
@eos.setter
def eos(self, eos):
if self.eos_in_a_box:
self.eos_in_a_box.pop()
# Pass this mutable list to objects so if it is changed, it gets
# changed in the property method too
self.eos_in_a_box.append(eos)
def set_TP_sources(self):
# Tempearture and Pressure Denepdence
# Get and choose initial methods
# print(get_chemical_constants(self.CAS, 'VaporPressure'))
self.VaporPressure = VaporPressure(Tb=self.Tb, Tc=self.Tc, Pc=self.Pc,
omega=self.omega, CASRN=self.CAS,
eos=self.eos_in_a_box,
exp_poly_fit=get_chemical_constants(self.CAS, 'VaporPressure'))
self.Psat_298 = self.VaporPressure.T_dependent_property(298.15)
self.phase_STP = identify_phase(T=298.15, P=101325., Tm=self.Tm, Tb=self.Tb, Tc=self.Tc, Psat=self.Psat_298)
if self.Pt is None and self.Tt is not None:
self.Pt = self.VaporPressure(self.Tt)
self.Pt_source = 'VaporPressure'
# Chemistry
if self.phase_STP == 'g':
H_fun = Hfg
H_methods_fun = Hfg_methods
elif self.phase_STP == 'l':
H_fun = Hfl
H_methods_fun = Hfl_methods
elif self.phase_STP == 's':
H_fun = Hfs
H_methods_fun = Hfs_methods
else:
H_methods_fun = H_fun = None
if H_fun is not None:
self.Hf_sources = H_methods_fun(CASRN=self.CAS)
self.Hf_source = self.Hf_sources[0] if self.Hf_sources else None
self.Hfm = H_fun(CASRN=self.CAS, method=self.Hf_source)
else:
self.Hf_sources = []
self.Hf_source = self.Hfm = None
self.Hf = property_molar_to_mass(self.Hfm, self.MW) if (self.Hfm is not None) else None
self.combustion_stoichiometry = combustion_stoichiometry(self.atoms)
try:
self.Hcm = HHV_stoichiometry(self.combustion_stoichiometry, Hf=self.Hfm) if self.Hfm is not None else None
except:
self.Hcm = None
self.Hc = property_molar_to_mass(self.Hcm, self.MW) if (self.Hcm is not None) else None
self.Hcm_lower = LHV_from_HHV(self.Hcm, self.combustion_stoichiometry.get('H2O', 0.0)) if self.Hcm is not None else None
self.Hc_lower = property_molar_to_mass(self.Hcm_lower, self.MW) if (self.Hcm_lower is not None) else None
# Fire Safety Limits
self.Tflash = T_flash(self.CAS, method=self.Tflash_source)
self.Tautoignition = T_autoignition(self.CAS, method=self.Tautoignition_source)
self.LFL_sources = LFL_methods(atoms=self.atoms, Hc=self.Hcm, CASRN=self.CAS)
self.LFL_source = self.LFL_sources[0]
self.UFL_sources = UFL_methods(atoms=self.atoms, Hc=self.Hcm, CASRN=self.CAS)
self.UFL_source = self.UFL_sources[0]
try:
self.LFL = LFL(atoms=self.atoms, Hc=self.Hcm, CASRN=self.CAS, method=self.LFL_source)
except:
self.LFL = None
try:
self.UFL = UFL(atoms=self.atoms, Hc=self.Hcm, CASRN=self.CAS, method=self.UFL_source)
except:
self.UFL = None
self.Hfgm = Hfg(CASRN=self.CAS, method=self.Hfg_source)
self.Hfg = property_molar_to_mass(self.Hfgm, self.MW) if (self.Hfgm is not None) else None
self.S0gm = S0g(CASRN=self.CAS, method=self.S0g_source)
self.S0g = property_molar_to_mass(self.S0gm, self.MW) if (self.S0gm is not None) else None
# Calculated later
self.S0m = None
self.S0 = None
# Compute Gf and Gf(ig)
dHfs_std = []
S0_abs_elements = []
coeffs_elements = []
for atom, count in self.atoms.items():
try:
ele = periodic_table[atom]
H0, S0 = ele.Hf, ele.S0
if ele.number in homonuclear_elements:
H0, S0 = 0.5 * H0, 0.5 * S0
except KeyError:
H0, S0 = None, None # D, T
dHfs_std.append(H0)
S0_abs_elements.append(S0)
coeffs_elements.append(count)
self.elemental_reaction_data = (dHfs_std, S0_abs_elements, coeffs_elements)
try:
self.Gfgm = Gibbs_formation(self.Hfgm, self.S0gm, dHfs_std, S0_abs_elements, coeffs_elements)
except:
self.Gfgm = None
self.Gfg = property_molar_to_mass(self.Gfgm, self.MW) if (self.Gfgm is not None) else None
# Compute Entropy of formation
self.Sfgm = (self.Hfgm - self.Gfgm)/298.15 if (self.Hfgm is not None and self.Gfgm is not None) else None # hardcoded
self.Sfg = property_molar_to_mass(self.Sfgm, self.MW) if (self.Sfgm is not None) else None
try:
self.Hcgm = HHV_stoichiometry(self.combustion_stoichiometry, Hf=self.Hfgm) if self.Hfgm is not None else None
except:
self.Hcgm = None
self.Hcg = property_molar_to_mass(self.Hcgm, self.MW) if (self.Hcgm is not None) else None
self.Hcgm_lower = LHV_from_HHV(self.Hcgm, self.combustion_stoichiometry.get('H2O', 0.0)) if self.Hcgm is not None else None
self.Hcg_lower = property_molar_to_mass(self.Hcgm_lower, self.MW) if (self.Hcgm_lower is not None) else None
try:
self.StielPolar = Stiel_polar_factor(Psat=self.VaporPressure(T=self.Tc*0.6), Pc=self.Pc, omega=self.omega)
except:
self.StielPolar = None
self.VolumeLiquid = VolumeLiquid(MW=self.MW, Tb=self.Tb, Tc=self.Tc,
Pc=self.Pc, Vc=self.Vc, Zc=self.Zc, omega=self.omega,
dipole=self.dipole,
Psat=self.VaporPressure,
poly_fit=get_chemical_constants(self.CAS, 'VolumeLiquid'),
eos=self.eos_in_a_box, CASRN=self.CAS)
self.Vml_Tb = self.VolumeLiquid.T_dependent_property(self.Tb) if self.Tb else None
self.Vml_Tm = self.VolumeLiquid.T_dependent_property(self.Tm) if self.Tm else None
self.Vml_STP = self.VolumeLiquid.T_dependent_property(298.15)
self.rhoml_STP = 1.0/self.Vml_STP if self.Vml_STP else None
self.rhol_STP = Vm_to_rho(self.Vml_STP, self.MW) if self.Vml_STP else None
self.Vml_60F = self.VolumeLiquid.T_dependent_property(288.7055555555555)
self.rhoml_60F = 1.0/self.Vml_60F if self.Vml_60F else None
self.rhol_60F = Vm_to_rho(self.Vml_60F, self.MW) if self.Vml_60F else None
self.VolumeGas = VolumeGas(MW=self.MW, Tc=self.Tc, Pc=self.Pc,
omega=self.omega, dipole=self.dipole,
eos=self.eos_in_a_box, CASRN=self.CAS)
self.Vmg_STP = ideal_gas(T=298.15, P=101325)
self.VolumeSolid = VolumeSolid(CASRN=self.CAS, MW=self.MW, Tt=self.Tt, Vml_Tt=self.Vml_Tm,
poly_fit=get_chemical_constants(self.CAS, 'VolumeSolid'))
self.Vms_Tm = self.VolumeSolid.T_dependent_property(self.Tm) if self.Tm else None
self.rhoms_Tm = 1.0/self.Vms_Tm if self.Vms_Tm is not None else None
self.rhos_Tm = Vm_to_rho(self.Vms_Tm, self.MW) if self.Vms_Tm else None
self.HeatCapacityGas = HeatCapacityGas(CASRN=self.CAS, MW=self.MW, similarity_variable=self.similarity_variable, poly_fit=get_chemical_constants(self.CAS, 'HeatCapacityGas'))
self.HeatCapacitySolid = HeatCapacitySolid(MW=self.MW, similarity_variable=self.similarity_variable, CASRN=self.CAS, poly_fit=get_chemical_constants(self.CAS, 'HeatCapacitySolid'))
self.HeatCapacityLiquid = HeatCapacityLiquid(CASRN=self.CAS, MW=self.MW, similarity_variable=self.similarity_variable, Tc=self.Tc, omega=self.omega, Cpgm=self.HeatCapacityGas.T_dependent_property, poly_fit=get_chemical_constants(self.CAS, 'HeatCapacityLiquid'))
self.EnthalpyVaporization = EnthalpyVaporization(CASRN=self.CAS, Tb=self.Tb, Tc=self.Tc, Pc=self.Pc, omega=self.omega,
similarity_variable=self.similarity_variable,
poly_fit_ln_tau=get_chemical_constants(self.CAS, 'EnthalpyVaporization'))
self.Hvap_Tbm = self.EnthalpyVaporization.T_dependent_property(self.Tb) if self.Tb else None
self.Hvap_Tb = property_molar_to_mass(self.Hvap_Tbm, self.MW)
self.Svap_Tbm = self.Hvap_Tb/self.Tb if (self.Tb is not None and self.Hvap_Tb is not None) else None
self.Hvapm_298 = self.EnthalpyVaporization.T_dependent_property(298.15)
self.Hvap_298 = property_molar_to_mass(self.Hvapm_298, self.MW) if self.Hvapm_298 else None
self.EnthalpySublimation = EnthalpySublimation(CASRN=self.CAS, Tm=self.Tm, Tt=self.Tt,
Cpg=self.HeatCapacityGas, Cps=self.HeatCapacitySolid,
Hvap=self.EnthalpyVaporization,
poly_fit=get_chemical_constants(self.CAS, 'EnthalpySublimation'))
self.Hsubm = self.Hsub_Ttm = self.EnthalpySublimation(self.Tt) if self.Tt is not None else None
self.Hsub = self.Hsub_Tt = property_molar_to_mass(self.Hsub_Ttm, self.MW) if self.Hsub_Ttm is not None else None
self.Ssub_Ttm = self.Hsub_Ttm/self.Tt if (self.Tt is not None and self.Hsub_Ttm is not None) else None
self.Sfusm = self.Hfusm/self.Tm if (self.Tm is not None and self.Hfusm is not None) else None
self.SublimationPressure = SublimationPressure(CASRN=self.CAS, Tt=self.Tt, Pt=self.Pt, Hsub_t=self.Hsub_Ttm,
exp_poly_fit=get_chemical_constants(self.CAS, 'SublimationPressure'))
self.ViscosityLiquid = ViscosityLiquid(CASRN=self.CAS, MW=self.MW, Tm=self.Tm, Tc=self.Tc, Pc=self.Pc, Vc=self.Vc, omega=self.omega, Psat=self.VaporPressure, Vml=self.VolumeLiquid,
exp_poly_fit=get_chemical_constants(self.CAS, 'ViscosityLiquid'))
Vmg_atm_T_dependent = lambda T : self.VolumeGas.TP_dependent_property(T, 101325)
self.ViscosityGas = ViscosityGas(CASRN=self.CAS, MW=self.MW, Tc=self.Tc, Pc=self.Pc, Zc=self.Zc, dipole=self.dipole, Vmg=Vmg_atm_T_dependent,
poly_fit=get_chemical_constants(self.CAS, 'ViscosityGas'))
self.ThermalConductivityLiquid = ThermalConductivityLiquid(CASRN=self.CAS, MW=self.MW, Tm=self.Tm, Tb=self.Tb, Tc=self.Tc, Pc=self.Pc, omega=self.omega, Hfus=self.Hfusm,
poly_fit=get_chemical_constants(self.CAS, 'ThermalConductivityLiquid'))
self.ThermalConductivityGas = ThermalConductivityGas(CASRN=self.CAS, MW=self.MW, Tb=self.Tb, Tc=self.Tc, Pc=self.Pc, Vc=self.Vc, Zc=self.Zc, omega=self.omega, dipole=self.dipole, Vmg=self.VolumeGas, Cpgm=self.HeatCapacityGas,
mug=self.ViscosityGas,
poly_fit=get_chemical_constants(self.CAS, 'ThermalConductivityGas'))
self.SurfaceTension = SurfaceTension(CASRN=self.CAS, MW=self.MW, Tb=self.Tb, Tc=self.Tc, Pc=self.Pc, Vc=self.Vc, Zc=self.Zc, omega=self.omega, StielPolar=self.StielPolar, Hvap_Tb=self.Hvap_Tb, Vml=self.VolumeLiquid, Cpl=self.HeatCapacityLiquid,
exp_poly_fit_ln_tau=get_chemical_constants(self.CAS, 'SurfaceTension'))
self.Permittivity = self.PermittivityLiquid = PermittivityLiquid(CASRN=self.CAS, poly_fit=get_chemical_constants(self.CAS, 'PermittivityLiquid'))
# set molecular_diameter; depends on Vml_Tb, Vml_Tm
self.molecular_diameter_sources = molecular_diameter_methods(Tc=self.Tc, Pc=self.Pc, Vc=self.Vc, Zc=self.Zc, omega=self.omega, Vm=self.Vml_Tm, Vb=self.Vml_Tb, CASRN=self.CAS)
self.molecular_diameter_source = self.molecular_diameter_sources[0] if self.molecular_diameter_sources else None
self.molecular_diameter = molecular_diameter(Tc=self.Tc, Pc=self.Pc, Vc=self.Vc, Zc=self.Zc, omega=self.omega, Vm=self.Vml_Tm, Vb=self.Vml_Tb, method=self.molecular_diameter_source, CASRN=self.CAS)
# Adjust Gf, Hf if needed
try:
if self.Hfgm is not None and self.Hfm is None:
Hfm = None
if self.phase_STP == 'l' and self.Hvapm_298 is not None:
Hfm = Hf_basis_converter(Hvapm=self.Hvapm_298, Hf_gas=self.Hfgm)
elif self.phase_STP == 'g':
Hfm = self.Hfgm
if Hfm is not None:
self.Hfm = Hfm
self.Hf = property_molar_to_mass(self.Hfm, self.MW) if (self.Hfm is not None) else None
elif self.Hfm is not None and self.Hfgm is None:
Hfmg = None
if self.phase_STP == 'l' and self.Hvapm_298 is not None:
Hfmg = Hf_basis_converter(Hvapm=self.Hvapm_298, Hf_liq=self.Hfm)
elif self.phase_STP == 'g':
Hfmg = self.Hfm
if Hfmg is not None:
self.Hfmg = Hfmg
self.Hfg = property_molar_to_mass(self.Hfmg, self.MW) if (self.Hfmg is not None) else None
except:
pass
try:
from thermo.chemical_utils import S0_basis_converter
if self.S0gm is not None and self.S0m is None:
S0m = None
if self.phase_STP == 'l':
S0m = S0_basis_converter(self, S0_gas=self.S0gm)
elif self.phase_STP == 'g':
S0m = self.S0gm
if S0m is not None:
self.S0m = S0m
self.S0 = property_molar_to_mass(self.S0m, self.MW) if (self.S0m is not None) else None
elif self.S0m is not None and self.S0gm is None:
S0gm = None
if self.phase_STP == 'l':
S0gm = S0_basis_converter(self, S0_liq=self.S0m)
elif self.phase_STP == 'g':
S0gm = self.S0m
if S0gm is not None:
self.S0gm = S0gm
self.S0g = property_molar_to_mass(self.S0gm, self.MW) if (self.S0gm is not None) else None
except:
pass
try:
self.Gfm = Gibbs_formation(self.Hfm, self.S0m, *self.elemental_reaction_data)
except:
self.Gfm = None
self.Gf = property_molar_to_mass(self.Gfm, self.MW) if (self.Gfm is not None) else None
self.Sfm = (self.Hfm - self.Gfm)/298.15 if (self.Hfm is not None and self.Gfm is not None) else None
self.Sf = property_molar_to_mass(self.Sfm, self.MW) if (self.Sfm is not None) else None
self.solubility_parameter_STP = solubility_parameter(T=298.15, Hvapm=self.Hvapm_298, Vml=self.Vml_STP) if (self.Hvapm_298 is not None and self.Vml_STP is not None) else None
def set_ref(self, T_ref=298.15, P_ref=101325, phase_ref='calc', H_ref=0, S_ref=0):
# Muse run after set_TP_sources, set_phase due to HeatCapacity*, phase_STP
self.T_ref = getattr(self, T_ref) if isinstance(T_ref, str) else T_ref
self.P_ref = getattr(self, P_ref) if isinstance(P_ref, str) else P_ref
self.H_ref = getattr(self, H_ref) if isinstance(H_ref, str) else H_ref
self.S_ref = getattr(self, S_ref) if isinstance(S_ref, str) else S_ref
self.phase_ref = self.phase_STP if phase_ref == 'calc' else phase_ref
integrators = {'s': self.HeatCapacitySolid.T_dependent_property_integral,
'l': self.HeatCapacityLiquid.T_dependent_property_integral,
'g': self.HeatCapacityGas.T_dependent_property_integral}
integrators_T = {'s': self.HeatCapacitySolid.T_dependent_property_integral_over_T,
'l': self.HeatCapacityLiquid.T_dependent_property_integral_over_T,
'g': self.HeatCapacityGas.T_dependent_property_integral_over_T}
# Integrals stored to avoid recalculation, all from T_low to T_high
try:
# Enthalpy integrals
if self.phase_ref != 'l' and self.Tm and self.Tb:
self.H_int_l_Tm_to_Tb = integrators['l'](self.Tm, self.Tb)
if self.phase_ref == 's' and self.Tm:
self.H_int_T_ref_s_to_Tm = integrators['s'](self.T_ref, self.Tm)
if self.phase_ref == 'g' and self.Tb:
self.H_int_Tb_to_T_ref_g = integrators['g'](self.Tb, self.T_ref)
if self.phase_ref == 'l' and self.Tm and self.Tb:
self.H_int_l_T_ref_l_to_Tb = integrators['l'](self.T_ref, self.Tb)
self.H_int_l_Tm_to_T_ref_l = integrators['l'](self.Tm, self.T_ref)
# Entropy integrals
if self.phase_ref != 'l' and self.Tm and self.Tb:
self.S_int_l_Tm_to_Tb = integrators_T['l'](self.Tm, self.Tb)
if self.phase_ref == 's' and self.Tm:
self.S_int_T_ref_s_to_Tm = integrators_T['s'](self.T_ref, self.Tm)
if self.phase_ref == 'g' and self.Tb:
self.S_int_Tb_to_T_ref_g = integrators_T['g'](self.Tb, self.T_ref)
if self.phase_ref == 'l' and self.Tm and self.Tb:
self.S_int_l_T_ref_l_to_Tb = integrators_T['l'](self.T_ref, self.Tb)
self.S_int_l_Tm_to_T_ref_l = integrators_T['l'](self.Tm, self.T_ref)
except:
pass
# Excess properties stored
try:
if self.phase_ref == 'g':
self.eos_phase_ref = self.eos.to_TP(self.T_ref, self.P_ref)
self.H_dep_ref_g = self.eos_phase_ref.H_dep_g
self.S_dep_ref_g = self.eos_phase_ref.S_dep_g
elif self.phase_ref == 'l':
self.eos_phase_ref = self.eos.to_TP(self.T_ref, self.P_ref)
self.H_dep_ref_l = self.eos_phase_ref.H_dep_l
self.S_dep_ref_l = self.eos_phase_ref.S_dep_l
self.H_dep_T_ref_Pb = self.eos.to_TP(self.T_ref, 101325).H_dep_l
self.S_dep_T_ref_Pb = self.eos.to_TP(self.T_ref, 101325).S_dep_l
if self.Tb:
self.eos_Tb = self.eos.to_TP(self.Tb, 101325)
self.H_dep_Tb_Pb_g = self.eos_Tb.H_dep_g
self.H_dep_Tb_Pb_l = self.eos_Tb.H_dep_l
self.H_dep_Tb_P_ref_g = self.eos.to_TP(self.Tb, self.P_ref).H_dep_g
self.S_dep_Tb_P_ref_g = self.eos.to_TP(self.Tb, self.P_ref).S_dep_g
self.S_dep_Tb_Pb_g = self.eos_Tb.S_dep_g
self.S_dep_Tb_Pb_l = self.eos_Tb.S_dep_l
# if self.Tt and self.Pt:
# self.eos_Tt = self.eos.to_TP(self.Tt, self.Pt)
# self.H_dep_Tt_g = self.eos_Tt.H_dep_g
## self.H_dep_Tt_l = self.eos_Tt.H_dep_l
#
# self.S_dep_Tt_g = self.eos_Tt.S_dep_g
## self.S_dep_Tt_l = self.eos_Tt.S_dep_l
except:
pass
def calc_H(self, T, P):
integrators = {'s': self.HeatCapacitySolid.T_dependent_property_integral,
'l': self.HeatCapacityLiquid.T_dependent_property_integral,
'g': self.HeatCapacityGas.T_dependent_property_integral}
try:
H = self.H_ref
if self.phase == self.phase_ref:
H += integrators[self.phase](self.T_ref, T)
elif self.phase_ref == 's' and self.phase == 'l':
H += self.H_int_T_ref_s_to_Tm + self.Hfusm + integrators['l'](self.Tm, T)
elif self.phase_ref == 'l' and self.phase == 's':
H += -self.H_int_l_Tm_to_T_ref_l - self.Hfusm + integrators['s'](self.Tm, T)
elif self.phase_ref == 'l' and self.phase == 'g':
H += self.H_int_l_T_ref_l_to_Tb + self.Hvap_Tbm + integrators['g'](self.Tb, T)
elif self.phase_ref == 'g' and self.phase == 'l':
H += -self.H_int_Tb_to_T_ref_g - self.Hvap_Tbm + integrators['l'](self.Tb, T)
elif self.phase_ref == 's' and self.phase == 'g':
H += self.H_int_T_ref_s_to_Tm + self.Hfusm + self.H_int_l_Tm_to_Tb + self.Hvap_Tbm + integrators['g'](self.Tb, T)
elif self.phase_ref == 'g' and self.phase == 's':
H += -self.H_int_Tb_to_T_ref_g - self.Hvap_Tbm - self.H_int_l_Tm_to_Tb - self.Hfusm + integrators['s'](self.Tm, T)
else:
raise Exception('Unknown error')
except:
return None
return H
def calc_H_excess(self, T, P):
H_dep = 0
if self.phase_ref == 'g' and self.phase == 'g':
H_dep += self.eos.to_TP(T, P).H_dep_g - self.H_dep_ref_g
elif self.phase_ref == 'l' and self.phase == 'l':
try:
H_dep += self.eos.to_TP(T, P).H_dep_l - self._eos_T_101325.H_dep_l
except:
H_dep += 0
elif self.phase_ref == 'g' and self.phase == 'l':
H_dep += self.H_dep_Tb_Pb_g - self.H_dep_Tb_P_ref_g
H_dep += (self.eos.to_TP(T, P).H_dep_l - self._eos_T_101325.H_dep_l)
elif self.phase_ref == 'l' and self.phase == 'g':
H_dep += self.H_dep_T_ref_Pb - self.H_dep_ref_l
H_dep += (self.eos.to_TP(T, P).H_dep_g - self.H_dep_Tb_Pb_g)
return H_dep
def calc_S_excess(self, T, P):
S_dep = 0
if self.phase_ref == 'g' and self.phase == 'g':
S_dep += self.eos.to_TP(T, P).S_dep_g - self.S_dep_ref_g
elif self.phase_ref == 'l' and self.phase == 'l':
try:
S_dep += self.eos.to_TP(T, P).S_dep_l - self._eos_T_101325.S_dep_l
except:
S_dep += 0
elif self.phase_ref == 'g' and self.phase == 'l':
S_dep += self.S_dep_Tb_Pb_g - self.S_dep_Tb_P_ref_g
S_dep += (self.eos.to_TP(T, P).S_dep_l - self._eos_T_101325.S_dep_l)
elif self.phase_ref == 'l' and self.phase == 'g':
S_dep += self.S_dep_T_ref_Pb - self.S_dep_ref_l
S_dep += (self.eos.to_TP(T, P).S_dep_g - self.S_dep_Tb_Pb_g)
return S_dep
def calc_S(self, T, P):
integrators_T = {'s': self.HeatCapacitySolid.T_dependent_property_integral_over_T,
'l': self.HeatCapacityLiquid.T_dependent_property_integral_over_T,
'g': self.HeatCapacityGas.T_dependent_property_integral_over_T}
try:
S = self.S_ref
if self.phase == self.phase_ref:
S += integrators_T[self.phase](self.T_ref, T)
if self.phase in ['l', 'g']:
S += -R*log(P/self.P_ref)
elif self.phase_ref == 's' and self.phase == 'l':
S += self.S_int_T_ref_s_to_Tm + self.Hfusm/self.Tm + integrators_T['l'](self.Tm, T)
elif self.phase_ref == 'l' and self.phase == 's':
S += - self.S_int_l_Tm_to_T_ref_l - self.Hfusm/self.Tm + integrators_T['s'](self.Tm, T)
elif self.phase_ref == 'l' and self.phase == 'g':
S += self.S_int_l_T_ref_l_to_Tb + self.Hvap_Tbm/self.Tb + integrators_T['g'](self.Tb, T) -R*log(P/self.P_ref) # TODO add to other states
elif self.phase_ref == 'g' and self.phase == 'l':
S += - self.S_int_Tb_to_T_ref_g - self.Hvapm/self.Tb + integrators_T['l'](self.Tb, T)
elif self.phase_ref == 's' and self.phase == 'g':
S += self.S_int_T_ref_s_to_Tm + self.Hfusm/self.Tm + self.S_int_l_Tm_to_Tb + self.Hvap_Tbm/self.Tb + integrators_T['g'](self.Tb, T)
elif self.phase_ref == 'g' and self.phase == 's':
S += - self.S_int_Tb_to_T_ref_g - self.Hvap_Tbm/self.Tb - self.S_int_l_Tm_to_Tb - self.Hfusm/self.Tm + integrators_T['s'](self.Tm, T)
else:
raise Exception('Unknown error')
except:
return None
return S
def calculate_TH(self, T, H):
def to_solve(P):
self.calculate(T, P)
return self.H - H
return newton(to_solve, self.P)
def calculate_PH(self, P, H):
def to_solve(T):
self.calculate(T, P)
return self.H - H
return newton(to_solve, self.T)
def calculate_TS(self, T, S):
def to_solve(P):
self.calculate(T, P)
return self.S - S
return newton(to_solve, self.P)
def calculate_PS(self, P, S):
def to_solve(T):
self.calculate(T, P)
return self.S - S
return newton(to_solve, self.T)
def set_thermo(self):
try:
self._eos_T_101325 = self.eos.to_TP(self.T, 101325)
self.Hm = self.calc_H(self.T, self.P)
self.Hm += self.calc_H_excess(self.T, self.P)
self.H = property_molar_to_mass(self.Hm, self.MW) if (self.Hm is not None) else None
self.Sm = self.calc_S(self.T, self.P)
self.Sm += self.calc_S_excess(self.T, self.P)
self.S = property_molar_to_mass(self.Sm, self.MW) if (self.Sm is not None) else None
self.G = self.H - self.T*self.S if (self.H is not None and self.S is not None) else None
self.Gm = self.Hm - self.T*self.Sm if (self.Hm is not None and self.Sm is not None) else None
except:
pass
@property
def Um(self):
r'''Internal energy of the chemical at its current temperature and
pressure, in units of [J/mol].
This property requires that :obj:`thermo.chemical.set_thermo` ran
successfully to be accurate.
It also depends on the molar volume of the chemical at its current
conditions.
'''
return self.Hm - self.P*self.Vm if (self.Vm and self.Hm is not None) else None
@property
def U(self):
r'''Internal energy of the chemical at its current temperature and
pressure, in units of [J/kg].
This property requires that :obj:`thermo.chemical.set_thermo` ran
successfully to be accurate.
It also depends on the molar volume of the chemical at its current
conditions.
'''
return property_molar_to_mass(self.Um, self.MW) if (self.Um is not None) else None
@property
def Am(self):
r'''Helmholtz energy of the chemical at its current temperature and
pressure, in units of [J/mol].
This property requires that :obj:`thermo.chemical.set_thermo` ran
successfully to be accurate.
It also depends on the molar volume of the chemical at its current
conditions.
'''
return self.Um - self.T*self.Sm if (self.Um is not None and self.Sm is not None) else None
@property
def A(self):
r'''Helmholtz energy of the chemical at its current temperature and
pressure, in units of [J/kg].
This property requires that :obj:`thermo.chemical.set_thermo` ran
successfully to be accurate.
It also depends on the molar volume of the chemical at its current
conditions.
'''
return self.U - self.T*self.S if (self.U is not None and self.S is not None) else None
### Temperature independent properties - calculate lazily
@property
def charge(self):
r'''Charge of a chemical, computed with RDKit from a chemical's SMILES.
If RDKit is not available, holds None.
Examples
--------
>>> Chemical('sodium ion').charge
1
'''
try:
if not self.rdkitmol:
return charge_from_formula(self.formula)
else:
from rdkit import Chem
return Chem.GetFormalCharge(self.rdkitmol)
except:
return charge_from_formula(self.formula)
@property
def rings(self):
r'''Number of rings in a chemical, computed with RDKit from a
chemical's SMILES. If RDKit is not available, holds None.
Examples
--------
>>> Chemical('Paclitaxel').rings
7
'''
try:
from rdkit.Chem import Descriptors
return Descriptors.RingCount(self.rdkitmol)
except:
return None
@property
def aromatic_rings(self):
r'''Number of aromatic rings in a chemical, computed with RDKit from a
chemical's SMILES. If RDKit is not available, holds None.
Examples
--------
>>> Chemical('Paclitaxel').aromatic_rings
3
'''
try:
from rdkit.Chem import Descriptors
return Descriptors.NumAromaticRings(self.rdkitmol)
except:
return None
@property
def rdkitmol(self):
r'''RDKit object of the chemical, without hydrogen. If RDKit is not
available, holds None.
For examples of what can be done with RDKit, see
`their website <http://www.rdkit.org/docs/GettingStartedInPython.html>`_.
'''
if self.__rdkitmol:
return self.__rdkitmol
else:
try:
from rdkit import Chem
self.__rdkitmol = Chem.MolFromSmiles(self.smiles)
return self.__rdkitmol
except:
return None
@property
def rdkitmol_Hs(self):
r'''RDKit object of the chemical, with hydrogen. If RDKit is not
available, holds None.
For examples of what can be done with RDKit, see
`their website <http://www.rdkit.org/docs/GettingStartedInPython.html>`_.
'''
if self.__rdkitmol_Hs:
return self.__rdkitmol_Hs
else:
try:
from rdkit import Chem
self.__rdkitmol_Hs = Chem.AddHs(self.rdkitmol)
return self.__rdkitmol_Hs
except:
return None
@property
def Hill(self):
r'''Hill formula of a compound. For a description of the Hill system,
see :obj:`chemicals.elements.atoms_to_Hill`.
Examples
--------
>>> Chemical('furfuryl alcohol').Hill
'C5H6O2'
'''
if self.__Hill:
return self.__Hill
else:
self.__Hill = atoms_to_Hill(self.atoms)
return self.__Hill
@property
def atom_fractions(self):
r'''Dictionary of atom:fractional occurence of the elements in a
chemical. Useful when performing element balances. For mass-fraction
occurences, see :obj:`mass_fractions`.
Examples
--------
>>> Chemical('Ammonium aluminium sulfate').atom_fractions
{'H': 0.25, 'S': 0.125, 'Al': 0.0625, 'O': 0.5, 'N': 0.0625}
'''
if self.__atom_fractions:
return self.__atom_fractions
else:
self.__atom_fractions = atom_fractions(self.atoms)
return self.__atom_fractions
@property
def mass_fractions(self):
r'''Dictionary of atom:mass-weighted fractional occurence of elements.
Useful when performing mass balances. For atom-fraction occurences, see
:obj:`atom_fractions`.
Examples
--------
>>> Chemical('water').mass_fractions
{'H': 0.11189834407236524, 'O': 0.8881016559276347}
'''
if self.__mass_fractions:
return self.__mass_fractions
else:
self.__mass_fractions = mass_fractions(self.atoms, self.MW)
return self.__mass_fractions
@property
def legal_status(self):
r'''Dictionary of legal status indicators for the chemical.
Examples
--------
>>> Chemical('benzene').legal_status
{'DSL': 'LISTED', 'EINECS': 'LISTED', 'NLP': 'UNLISTED', 'SPIN': 'LISTED', 'TSCA': 'LISTED'}
'''
if self.__legal_status:
return self.__legal_status
else:
self.__legal_status = legal_status(self.CAS, method='COMBINED')
return self.__legal_status
@property
def economic_status(self):
r'''Dictionary of economic status indicators for the chemical.
Examples
--------
>>> Chemical('benzene').economic_status
["US public: {'Manufactured': 6165232.1, 'Imported': 463146.474, 'Exported': 271908.252}",
u'1,000,000 - 10,000,000 tonnes per annum',
u'Intermediate Use Only',
'OECD HPV Chemicals']
'''
if self.__economic_status:
return self.__economic_status
else:
self.__economic_status = economic_status(self.CAS, method='Combined')
return self.__economic_status
@property
def UNIFAC_groups(self):
r'''Dictionary of UNIFAC subgroup: count groups for the original
UNIFAC subgroups, as determined by `DDBST's online service <http://www.ddbst.com/unifacga.html>`_.
Examples
--------
>>> Chemical('Cumene').UNIFAC_groups
{1: 2, 9: 5, 13: 1}
'''
if self.__UNIFAC_groups:
return self.__UNIFAC_groups
else:
load_group_assignments_DDBST()
if self.InChI_Key in DDBST_UNIFAC_assignments:
self.__UNIFAC_groups = DDBST_UNIFAC_assignments[self.InChI_Key]
return self.__UNIFAC_groups
else:
return None
@property
def UNIFAC_Dortmund_groups(self):
r'''Dictionary of Dortmund UNIFAC subgroup: count groups for the
Dortmund UNIFAC subgroups, as determined by `DDBST's online service <http://www.ddbst.com/unifacga.html>`_.
Examples
--------
>>> Chemical('Cumene').UNIFAC_Dortmund_groups
{1: 2, 9: 5, 13: 1}
'''
if self.__UNIFAC_Dortmund_groups:
return self.__UNIFAC_Dortmund_groups
else:
load_group_assignments_DDBST()
if self.InChI_Key in DDBST_MODIFIED_UNIFAC_assignments:
self.__UNIFAC_Dortmund_groups = DDBST_MODIFIED_UNIFAC_assignments[self.InChI_Key]
return self.__UNIFAC_Dortmund_groups
else:
return None
@property
def PSRK_groups(self):
r'''Dictionary of PSRK subgroup: count groups for the PSRK subgroups,
as determined by `DDBST's online service <http://www.ddbst.com/unifacga.html>`_.
Examples
--------
>>> Chemical('Cumene').PSRK_groups
{1: 2, 9: 5, 13: 1}
'''
if self.__PSRK_groups:
return self.__PSRK_groups
else:
load_group_assignments_DDBST()
if self.InChI_Key in DDBST_PSRK_assignments:
self.__PSRK_groups = DDBST_PSRK_assignments[self.InChI_Key]
return self.__PSRK_groups
else:
return None
@property
def UNIFAC_R(self):
r'''UNIFAC `R` (normalized Van der Waals volume), dimensionless.
Used in the UNIFAC model.
Examples
--------
>>> Chemical('benzene').UNIFAC_R
3.1878
'''
if self.UNIFAC_groups:
return UNIFAC_RQ(self.UNIFAC_groups)[0]
return None
@property
def UNIFAC_Q(self):
r'''UNIFAC `Q` (normalized Van der Waals area), dimensionless.
Used in the UNIFAC model.
Examples
--------
>>> Chemical('decane').UNIFAC_Q
6.016
'''
if self.UNIFAC_groups:
return UNIFAC_RQ(self.UNIFAC_groups)[1]
return None
@property
def Van_der_Waals_volume(self):
r'''Unnormalized Van der Waals volume, in units of [m^3/mol].
Examples
--------
>>> Chemical('hexane').Van_der_Waals_volume
6.8261966e-05
'''
if self.UNIFAC_R:
return Van_der_Waals_volume(self.UNIFAC_R)
return None
@property
def Van_der_Waals_area(self):
r'''Unnormalized Van der Waals area, in units of [m^2/mol].
Examples
--------
>>> Chemical('hexane').Van_der_Waals_area
964000.0
'''
if self.UNIFAC_Q:
return Van_der_Waals_area(self.UNIFAC_Q)
return None
@property
def R_specific(self):
r'''Specific gas constant, in units of [J/kg/K].
Examples
--------
>>> Chemical('water').R_specific
461.52265188218
'''
return property_molar_to_mass(R, self.MW)
### One phase properties - calculate lazily
@property
def Psat(self):
r'''Vapor pressure of the chemical at its current temperature, in units
of [Pa]. For calculation of this property at other temperatures,
or specifying manually the method used to calculate it, and more - see
the object oriented interface :obj:`thermo.vapor_pressure.VaporPressure`;
each Chemical instance creates one to actually perform the calculations.
Examples
--------
>>> Chemical('water', T=320).Psat
10533.614271198725
>>> Chemical('water').VaporPressure.T_dependent_property(320)
10533.614271198725
>>> Chemical('water').VaporPressure.all_methods
set(['VDI_PPDS', 'BOILING_CRITICAL', 'WAGNER_MCGARRY', 'AMBROSE_WALTON', 'COOLPROP', 'LEE_KESLER_PSAT', 'EOS', 'ANTOINE_POLING', 'SANJARI', 'DIPPR_PERRY_8E', 'Edalat'])
'''
return self.VaporPressure(self.T)
@property
def Hvapm(self):
r'''Enthalpy of vaporization of the chemical at its current temperature,
in units of [J/mol]. For calculation of this property at other
temperatures, or specifying manually the method used to calculate it,
and more - see the object oriented interface
:obj:`thermo.phase_change.EnthalpyVaporization`; each Chemical instance
creates one to actually perform the calculations.
Examples
--------
>>> Chemical('water', T=320).Hvapm
43048.23612280223
>>> Chemical('water').EnthalpyVaporization.T_dependent_property(320)
43048.23612280223
>>> Chemical('water').EnthalpyVaporization.all_methods
set(['VDI_PPDS', 'MORGAN_KOBAYASHI', 'VETERE', 'VELASCO', 'LIU', 'COOLPROP', 'CRC_HVAP_298', 'CLAPEYRON', 'SIVARAMAN_MAGEE_KOBAYASHI', 'ALIBAKHSHI', 'DIPPR_PERRY_8E', 'RIEDEL', 'CHEN', 'PITZER', 'CRC_HVAP_TB'])
'''
return self.EnthalpyVaporization(self.T)
@property
def Hvap(self):
r'''Enthalpy of vaporization of the chemical at its current temperature,
in units of [J/kg].
This property uses the object-oriented interface
:obj:`thermo.phase_change.EnthalpyVaporization`, but converts its
results from molar to mass units.
Examples
--------
>>> Chemical('water', T=320).Hvap
2389540.219347256
'''
Hvamp = self.Hvapm
if Hvamp:
return property_molar_to_mass(Hvamp, self.MW)
return None
@property
def Cpsm(self):
r'''Solid-phase heat capacity of the chemical at its current temperature,
in units of [J/mol/K]. For calculation of this property at other
temperatures, or specifying manually the method used to calculate it,
and more - see the object oriented interface
:obj:`thermo.heat_capacity.HeatCapacitySolid`; each Chemical instance
creates one to actually perform the calculations.
Examples
--------
>>> Chemical('palladium').Cpsm
24.930765664000003
>>> Chemical('palladium').HeatCapacitySolid.T_dependent_property(320)
25.098979200000002
>>> Chemical('palladium').HeatCapacitySolid.all_methods
set(["PERRY151", 'CRCSTD', 'LASTOVKA_S'])
'''
return self.HeatCapacitySolid(self.T)
@property
def Cplm(self):
r'''Liquid-phase heat capacity of the chemical at its current temperature,
in units of [J/mol/K]. For calculation of this property at other
temperatures, or specifying manually the method used to calculate it,
and more - see the object oriented interface
:obj:`thermo.heat_capacity.HeatCapacityLiquid`; each Chemical instance
creates one to actually perform the calculations.
Notes
-----
Some methods give heat capacity along the saturation line, some at
1 atm but only up to the normal boiling point, and some give heat
capacity at 1 atm up to the normal boiling point and then along the
saturation line. Real-liquid heat capacity is pressure dependent, but
this interface is not.
Examples
--------
>>> Chemical('water').Cplm
75.31462591538556
>>> Chemical('water').HeatCapacityLiquid.T_dependent_property(320)
75.2591744360631
>>> Chemical('water').HeatCapacityLiquid.T_dependent_property_integral(300, 320)
1505.0619005000553
'''
return self.HeatCapacityLiquid(self.T)
@property
def Cpgm(self):
r'''Gas-phase ideal gas heat capacity of the chemical at its current
temperature, in units of [J/mol/K]. For calculation of this property at
other temperatures, or specifying manually the method used to calculate
it, and more - see the object oriented interface
:obj:`thermo.heat_capacity.HeatCapacityGas`; each Chemical instance
creates one to actually perform the calculations.
Examples
--------
>>> Chemical('water').Cpgm
33.583577868850675
>>> Chemical('water').HeatCapacityGas.T_dependent_property(320)
33.67865044005934
>>> Chemical('water').HeatCapacityGas.T_dependent_property_integral(300, 320)
672.6480417835064
'''
return self.HeatCapacityGas(self.T)
@property
def Cps(self):
r'''Solid-phase heat capacity of the chemical at its current temperature,
in units of [J/kg/K]. For calculation of this property at other
temperatures, or specifying manually the method used to calculate it,
and more - see the object oriented interface
:obj:`thermo.heat_capacity.HeatCapacitySolid`; each Chemical instance
creates one to actually perform the calculations. Note that that
interface provides output in molar units.
Examples
--------
>>> Chemical('palladium', T=400).Cps
241.63563239992484
>>> Pd = Chemical('palladium', T=400)
>>> Cpsms = [Pd.HeatCapacitySolid.T_dependent_property(T) for T in np.linspace(300,500, 5)]
>>> [property_molar_to_mass(Cps, Pd.MW) for Cps in Cpsms]
[234.40150347679008, 238.01856793835751, 241.63563239992484, 245.25269686149224, 248.86976132305958]
'''
Cpsm = self.HeatCapacitySolid(self.T)
if Cpsm:
return property_molar_to_mass(Cpsm, self.MW)
return None
@property
def Cpl(self):
r'''Liquid-phase heat capacity of the chemical at its current temperature,
in units of [J/kg/K]. For calculation of this property at other
temperatures, or specifying manually the method used to calculate it,
and more - see the object oriented interface
:obj:`thermo.heat_capacity.HeatCapacityLiquid`; each Chemical instance
creates one to actually perform the calculations. Note that that
interface provides output in molar units.
Examples
--------
>>> Chemical('water', T=320).Cpl
4177.518996988284
Ideal entropy change of water from 280 K to 340 K, output converted
back to mass-based units of J/kg/K.
>>> dSm = Chemical('water').HeatCapacityLiquid.T_dependent_property_integral_over_T(280, 340)
>>> property_molar_to_mass(dSm, Chemical('water').MW)
812.1024585274956
'''
Cplm = self.HeatCapacityLiquid(self.T)
if Cplm:
return property_molar_to_mass(Cplm, self.MW)
return None
@property
def Cpg(self):
r'''Gas-phase heat capacity of the chemical at its current temperature,
in units of [J/kg/K]. For calculation of this property at other
temperatures, or specifying manually the method used to calculate it,
and more - see the object oriented interface
:obj:`thermo.heat_capacity.HeatCapacityGas`; each Chemical instance
creates one to actually perform the calculations. Note that that
interface provides output in molar units.
Examples
--------
>>> w = Chemical('water', T=520)
>>> w.Cpg
1967.6698314620658
'''
Cpgm = self.HeatCapacityGas(self.T)
if Cpgm:
return property_molar_to_mass(Cpgm, self.MW)
return None
@property
def Cvgm(self):
r'''Gas-phase ideal-gas contant-volume heat capacity of the chemical at
its current temperature, in units of [J/mol/K]. Subtracts R from
the ideal-gas heat capacity; does not include pressure-compensation
from an equation of state.
Examples
--------
>>> w = Chemical('water', T=520)
>>> w.Cvgm
27.13366316134193
'''
Cpgm = self.HeatCapacityGas(self.T)
if Cpgm:
return Cpgm - R
return None
@property
def Cvg(self):
r'''Gas-phase ideal-gas contant-volume heat capacity of the chemical at
its current temperature, in units of [J/kg/K]. Subtracts R from
the ideal-gas heat capacity; does not include pressure-compensation
from an equation of state.
Examples
--------
>>> w = Chemical('water', T=520)
>>> w.Cvg
1506.1471795798861
'''
Cvgm = self.Cvgm
if Cvgm:
return property_molar_to_mass(Cvgm, self.MW)
return None
@property
def isentropic_exponent(self):
r'''Gas-phase ideal-gas isentropic exponent of the chemical at its
current temperature, [dimensionless]. Does not include
pressure-compensation from an equation of state.
Examples
--------
>>> Chemical('hydrogen').isentropic_exponent
1.405237786321222
'''
Cp, Cv = self.Cpg, self.Cvg
if all((Cp, Cv)):
return isentropic_exponent(Cp, Cv)
return None
@property
def Vms(self):
r'''Solid-phase molar volume of the chemical at its current
temperature, in units of [m^3/mol]. For calculation of this property at
other temperatures, or specifying manually the method used to calculate
it, and more - see the object oriented interface
:obj:`thermo.volume.VolumeSolid`; each Chemical instance
creates one to actually perform the calculations.
Examples
--------
>>> Chemical('iron').Vms
7.09593392630242e-06
'''
return self.VolumeSolid(self.T)
@property
def Vml(self):
r'''Liquid-phase molar volume of the chemical at its current
temperature and pressure, in units of [m^3/mol]. For calculation of this
property at other temperatures or pressures, or specifying manually the
method used to calculate it, and more - see the object oriented interface
:obj:`thermo.volume.VolumeLiquid`; each Chemical instance
creates one to actually perform the calculations.
Examples
--------
>>> Chemical('cyclobutane', T=225).Vml
7.42395423425395e-05
'''
return self.VolumeLiquid(self.T, self.P)
@property
def Vmg(self):
r'''Gas-phase molar volume of the chemical at its current
temperature and pressure, in units of [m^3/mol]. For calculation of this
property at other temperatures or pressures, or specifying manually the
method used to calculate it, and more - see the object oriented interface
:obj:`thermo.volume.VolumeGas`; each Chemical instance
creates one to actually perform the calculations.
Examples
--------
Estimate the molar volume of the core of the sun, at 15 million K and
26.5 PetaPascals, assuming pure helium (actually 68% helium):
>>> Chemical('helium', T=15E6, P=26.5E15).Vmg
4.805464238181197e-07
'''
return self.VolumeGas(self.T, self.P)
@property
def Vmg_ideal(self):
r'''Gas-phase molar volume of the chemical at its current
temperature and pressure calculated with the ideal-gas law,
in units of [m^3/mol].
Examples
--------
>>> Chemical('helium', T=300.0, P=1e5).Vmg_ideal
0.0249433878544
'''
return ideal_gas(T=self.T, P=self.P)
@property
def rhos(self):
r'''Solid-phase mass density of the chemical at its current temperature,
in units of [kg/m^3]. For calculation of this property at
other temperatures, or specifying manually the method used
to calculate it, and more - see the object oriented interface
:obj:`thermo.volume.VolumeSolid`; each Chemical instance
creates one to actually perform the calculations. Note that that
interface provides output in molar units.
Examples
--------
>>> Chemical('iron').rhos
7869.999999999994
'''
Vms = self.Vms
if Vms:
return Vm_to_rho(Vms, self.MW)
return None
@property
def rhol(self):
r'''Liquid-phase mass density of the chemical at its current
temperature and pressure, in units of [kg/m^3]. For calculation of this
property at other temperatures and pressures, or specifying manually
the method used to calculate it, and more - see the object oriented
interface :obj:`thermo.volume.VolumeLiquid`; each Chemical instance
creates one to actually perform the calculations. Note that that
interface provides output in molar units.
Examples
--------
>>> Chemical('o-xylene', T=297).rhol
876.9946785618097
'''
Vml = self.Vml
if Vml:
return Vm_to_rho(Vml, self.MW)
return None
@property
def rhog(self):
r'''Gas-phase mass density of the chemical at its current temperature
and pressure, in units of [kg/m^3]. For calculation of this property at
other temperatures or pressures, or specifying manually the method used
to calculate it, and more - see the object oriented interface
:obj:`thermo.volume.VolumeGas`; each Chemical instance
creates one to actually perform the calculations. Note that that
interface provides output in molar units.
Examples
--------
Estimate the density of the core of the sun, at 15 million K and
26.5 PetaPascals, assuming pure helium (actually 68% helium):
>>> Chemical('helium', T=15E6, P=26.5E15).rhog
8329.27226509739
Compared to a result on
`Wikipedia <https://en.wikipedia.org/wiki/Solar_core>`_ of 150000
kg/m^3, the fundamental equation of state performs poorly.
>>> He = Chemical('helium', T=15E6, P=26.5E15)
>>> He.VolumeGas.method_P = 'IDEAL'
>>> He.rhog
850477.8
The ideal-gas law performs somewhat better, but vastly overshoots
the density prediction.
'''
Vmg = self.Vmg
if Vmg:
return Vm_to_rho(Vmg, self.MW)
return None
@property
def rhosm(self):
r'''Molar density of the chemical in the solid phase at the
current temperature and pressure, in units of [mol/m^3].
Utilizes the object oriented interface and
:obj:`thermo.volume.VolumeSolid` to perform the actual calculation of
molar volume.
Examples
--------
>>> Chemical('palladium').rhosm
112760.75925577903
'''
Vms = self.Vms
if Vms:
return 1./Vms
return None
@property
def rholm(self):
r'''Molar density of the chemical in the liquid phase at the
current temperature and pressure, in units of [mol/m^3].
Utilizes the object oriented interface and
:obj:`thermo.volume.VolumeLiquid` to perform the actual calculation of
molar volume.
Examples
--------
>>> Chemical('nitrogen', T=70).rholm
29937.20179186975
'''
Vml = self.Vml
if Vml:
return 1./Vml
return None
@property
def rhogm(self):
r'''Molar density of the chemical in the gas phase at the
current temperature and pressure, in units of [mol/m^3].
Utilizes the object oriented interface and
:obj:`thermo.volume.VolumeGas` to perform the actual calculation of
molar volume.
Examples
--------
>>> Chemical('tungsten hexafluoride').rhogm
42.01349946063116
'''
Vmg = self.Vmg
if Vmg:
return 1./Vmg
return None
@property
def Zs(self):
r'''Compressibility factor of the chemical in the solid phase at the
current temperature and pressure, [dimensionless].
Utilizes the object oriented interface and
:obj:`thermo.volume.VolumeSolid` to perform the actual calculation of
molar volume.
Examples
--------
>>> Chemical('palladium').Z
0.00036248477437931853
'''
Vms = self.Vms
if Vms:
return Z(self.T, self.P, Vms)
return None
@property
def Zl(self):
r'''Compressibility factor of the chemical in the liquid phase at the
current temperature and pressure, [dimensionless].
Utilizes the object oriented interface and
:obj:`thermo.volume.VolumeLiquid` to perform the actual calculation of
molar volume.
Examples
--------
>>> Chemical('water').Zl
0.0007385375470263454
'''
Vml = self.Vml
if Vml:
return Z(self.T, self.P, Vml)
return None
@property
def Zg(self):
r'''Compressibility factor of the chemical in the gas phase at the
current temperature and pressure, [dimensionless].
Utilizes the object oriented interface and
:obj:`thermo.volume.VolumeGas` to perform the actual calculation of
molar volume.
Examples
--------
>>> Chemical('sulfur hexafluoride', T=700, P=1E9).Zg
11.140084184207813
'''
Vmg = self.Vmg
if Vmg:
return Z(self.T, self.P, Vmg)
return None
@property
def SGs(self):
r'''Specific gravity of the solid phase of the chemical at the
specified temperature and pressure, [dimensionless].
The reference condition is water at 4 °C and 1 atm
(rho=999.017 kg/m^3). The SG varries with temperature and pressure
but only very slightly.
Examples
--------
>>> Chemical('iron').SGs
7.87774317235069
'''
rhos = self.rhos
if rhos is not None:
return SG(rhos)
return None
@property
def SGl(self):
r'''Specific gravity of the liquid phase of the chemical at the
specified temperature and pressure, [dimensionless].
The reference condition is water at 4 °C and 1 atm
(rho=999.017 kg/m^3). For liquids, SG is defined that the reference
chemical's T and P are fixed, but the chemical itself varies with
the specified T and P.
Examples
--------
>>> Chemical('water', T=365).SGl
0.9650065522428539
'''
rhol = self.rhol
if rhol is not None:
return SG(rhol)
return None
@property
def SGg(self):
r'''Specific gravity of the gas phase of the chemical, [dimensionless].
The reference condition is air at 15.6 °C (60 °F) and 1 atm
(rho=1.223 kg/m^3). The definition for gases uses the compressibility
factor of the reference gas and the chemical both at the reference
conditions, not the conditions of the chemical.
Examples
--------
>>> Chemical('argon').SGg
1.3795835970877504
'''
Vmg = self.VolumeGas(T=288.70555555555552, P=101325)
if Vmg:
rho = Vm_to_rho(Vmg, self.MW)
return SG(rho, rho_ref=1.2231876628642968) # calculated with Mixture
return None
@property
def API(self):
r'''API gravity of the liquid phase of the chemical, [degrees].
The reference condition is water at 15.6 °C (60 °F) and 1 atm
(rho=999.016 kg/m^3, standardized).
Examples
--------
>>> Chemical('water').API
9.999752435378895
'''
Vml = self.VolumeLiquid(T=288.70555555555552, P=101325)
if Vml:
rho = Vm_to_rho(Vml, self.MW)
sg = SG(rho, rho_ref=999.016)
return SG_to_API(sg)
@property
def Bvirial(self):
r'''Second virial coefficient of the gas phase of the chemical at its
current temperature and pressure, in units of [mol/m^3].
This property uses the object-oriented interface
:obj:`thermo.volume.VolumeGas`, converting its result with
:obj:`thermo.utils.B_from_Z`.
Examples
--------
>>> Chemical('water').Bvirial
-0.0009596286322838357
'''
if self.Vmg:
return B_from_Z(self.Zg, self.T, self.P)
return None
@property
def isobaric_expansion_l(self):
r'''Isobaric (constant-pressure) expansion of the liquid phase of the
chemical at its current temperature and pressure, in units of [1/K].
.. math::
\beta = \frac{1}{V}\left(\frac{\partial V}{\partial T} \right)_P
Utilizes the temperature-derivative method of
:obj:`thermo.volume.VolumeLiquid` to perform the actual calculation.
The derivatives are all numerical.
Examples
--------
>>> Chemical('dodecane', T=400).isobaric_expansion_l
0.0011617555762469477
'''
dV_dT = self.VolumeLiquid.TP_dependent_property_derivative_T(self.T, self.P)
Vm = self.Vml
if dV_dT and Vm:
return isobaric_expansion(V=Vm, dV_dT=dV_dT)
@property
def isobaric_expansion_g(self):
r'''Isobaric (constant-pressure) expansion of the gas phase of the
chemical at its current temperature and pressure, in units of [1/K].
.. math::
\beta = \frac{1}{V}\left(\frac{\partial V}{\partial T} \right)_P
Utilizes the temperature-derivative method of
:obj:`thermo.VolumeGas` to perform the actual calculation.
The derivatives are all numerical.
Examples
--------
>>> Chemical('Hexachlorobenzene', T=900).isobaric_expansion_g
0.001151869741981048
'''
dV_dT = self.VolumeGas.TP_dependent_property_derivative_T(self.T, self.P)
Vm = self.Vmg
if dV_dT and Vm:
return isobaric_expansion(V=Vm, dV_dT=dV_dT)
@property
def mul(self):
r'''Viscosity of the chemical in the liquid phase at its current
temperature and pressure, in units of [Pa*s].
For calculation of this property at other temperatures and pressures,
or specifying manually the method used to calculate it, and more - see
the object oriented interface
:obj:`thermo.viscosity.ViscosityLiquid`; each Chemical instance
creates one to actually perform the calculations.
Examples
--------
>>> Chemical('water', T=320).mul
0.0005767262693751547
'''
return self.ViscosityLiquid(self.T, self.P)
@property
def mug(self):
r'''Viscosity of the chemical in the gas phase at its current
temperature and pressure, in units of [Pa*s].
For calculation of this property at other temperatures and pressures,
or specifying manually the method used to calculate it, and more - see
the object oriented interface
:obj:`thermo.viscosity.ViscosityGas`; each Chemical instance
creates one to actually perform the calculations.
Examples
--------
>>> Chemical('water', T=320, P=100).mug
1.0431450856297212e-05
'''
return self.ViscosityGas(self.T, self.P)
@property
def kl(self):
r'''Thermal conductivity of the chemical in the liquid phase at its
current temperature and pressure, in units of [W/m/K].
For calculation of this property at other temperatures and pressures,
or specifying manually the method used to calculate it, and more - see
the object oriented interface
:obj:`thermo.thermal_conductivity.ThermalConductivityLiquid`; each
Chemical instance creates one to actually perform the calculations.
Examples
--------
>>> Chemical('water', T=320).kl
0.6369957248212118
'''
return self.ThermalConductivityLiquid(self.T, self.P)
@property
def kg(self):
r'''Thermal conductivity of the chemical in the gas phase at its
current temperature and pressure, in units of [W/m/K].
For calculation of this property at other temperatures and pressures,
or specifying manually the method used to calculate it, and more - see
the object oriented interface
:obj:`thermo.thermal_conductivity.ThermalConductivityGas`; each
Chemical instance creates one to actually perform the calculations.
Examples
--------
>>> Chemical('water', T=320).kg
0.021273128263091207
'''
return self.ThermalConductivityGas(self.T, self.P)
@property
def sigma(self):
r'''Surface tension of the chemical at its current temperature, in
units of [N/m].
For calculation of this property at other temperatures,
or specifying manually the method used to calculate it, and more - see
the object oriented interface :obj:`thermo.interface.SurfaceTension`;
each Chemical instance creates one to actually perform the calculations.
Examples
--------
>>> Chemical('water', T=320).sigma
0.06855002575793023
>>> Chemical('water', T=320).SurfaceTension.solve_property(0.05)
416.8307110842183
'''
return self.SurfaceTension(self.T)
@property
def permittivity(self):
r'''Relative permittivity (dielectric constant) of the chemical at its
current temperature, [dimensionless].
For calculation of this property at other temperatures,
or specifying manually the method used to calculate it, and more - see
the object oriented interface :obj:`thermo.permittivity.PermittivityLiquid`;
each Chemical instance creates one to actually perform the calculations.
Examples
--------
>>> Chemical('toluene', T=250).permittivity
2.49775625
'''
return self.Permittivity(self.T)
@property
def absolute_permittivity(self):
r'''Absolute permittivity of the chemical at its current temperature,
in units of [farad/meter]. Those units are equivalent to
ampere^2*second^4/kg/m^3.
Examples
--------
>>> Chemical('water', T=293.15).absolute_permittivity
7.096684821859018e-10
'''
permittivity = self.permittivity
if permittivity is not None:
return permittivity*epsilon_0
return None
@property
def JTl(self):
r'''Joule Thomson coefficient of the chemical in the liquid phase at
its current temperature and pressure, in units of [K/Pa].
.. math::
\mu_{JT} = \left(\frac{\partial T}{\partial P}\right)_H = \frac{1}{C_p}
\left[T \left(\frac{\partial V}{\partial T}\right)_P - V\right]
= \frac{V}{C_p}\left(\beta T-1\right)
Utilizes the temperature-derivative method of
:obj:`thermo.volume.VolumeLiquid` and the temperature-dependent heat
capacity method :obj:`thermo.heat_capacity.HeatCapacityLiquid` to
obtain the properties required for the actual calculation.
Examples
--------
>>> Chemical('dodecane', T=400).JTl
-3.0827160465192742e-07
'''
Vml, Cplm, isobaric_expansion_l = self.Vml, self.Cplm, self.isobaric_expansion_l
if all((Vml, Cplm, isobaric_expansion_l)):
return Joule_Thomson(T=self.T, V=Vml, Cp=Cplm, beta=isobaric_expansion_l)
return None
@property
def JTg(self):
r'''Joule Thomson coefficient of the chemical in the gas phase at
its current temperature and pressure, in units of [K/Pa].
.. math::
\mu_{JT} = \left(\frac{\partial T}{\partial P}\right)_H = \frac{1}{C_p}
\left[T \left(\frac{\partial V}{\partial T}\right)_P - V\right]
= \frac{V}{C_p}\left(\beta T-1\right)
Utilizes the temperature-derivative method of
:obj:`thermo.volume.VolumeGas` and the temperature-dependent heat
capacity method :obj:`thermo.heat_capacity.HeatCapacityGas` to
obtain the properties required for the actual calculation.
Examples
--------
>>> Chemical('dodecane', T=400, P=1000).JTg
5.4089897835384913e-05
'''
Vmg, Cpgm, isobaric_expansion_g = self.Vmg, self.Cpgm, self.isobaric_expansion_g
if all((Vmg, Cpgm, isobaric_expansion_g)):
return Joule_Thomson(T=self.T, V=Vmg, Cp=Cpgm, beta=isobaric_expansion_g)
return None
@property
def nul(self):
r'''Kinematic viscosity of the liquid phase of the chemical at its
current temperature and pressure, in units of [m^2/s].
.. math::
\nu = \frac{\mu}{\rho}
Utilizes the temperature and pressure dependent object oriented
interfaces :obj:`thermo.volume.VolumeLiquid`,
:obj:`thermo.viscosity.ViscosityLiquid` to calculate the
actual properties.
Examples
--------
>>> Chemical('methane', T=110).nul
2.858088468937331e-07
'''
mul, rhol = self.mul, self.rhol
if all([mul, rhol]):
return nu_mu_converter(mu=mul, rho=rhol)
return None
@property
def nug(self):
r'''Kinematic viscosity of the gas phase of the chemical at its
current temperature and pressure, in units of [m^2/s].
.. math::
\nu = \frac{\mu}{\rho}
Utilizes the temperature and pressure dependent object oriented
interfaces :obj:`thermo.volume.VolumeGas`,
:obj:`thermo.viscosity.ViscosityGas` to calculate the
actual properties.
Examples
--------
>>> Chemical('methane', T=115).nug
2.5056924327995865e-06
'''
mug, rhog = self.mug, self.rhog
if all([mug, rhog]):
return nu_mu_converter(mu=mug, rho=rhog)
return None
@property
def alphal(self):
r'''Thermal diffusivity of the liquid phase of the chemical at its
current temperature and pressure, in units of [m^2/s].
.. math::
\alpha = \frac{k}{\rho Cp}
Utilizes the temperature and pressure dependent object oriented
interfaces :obj:`thermo.volume.VolumeLiquid`,
:obj:`thermo.thermal_conductivity.ThermalConductivityLiquid`,
and :obj:`thermo.heat_capacity.HeatCapacityLiquid` to calculate the
actual properties.
Examples
--------
>>> Chemical('nitrogen', T=70).alphal
9.444949636299626e-08
'''
kl, rhol, Cpl = self.kl, self.rhol, self.Cpl
if all([kl, rhol, Cpl]):
return thermal_diffusivity(k=kl, rho=rhol, Cp=Cpl)
return None
@property
def alphag(self):
r'''Thermal diffusivity of the gas phase of the chemical at its
current temperature and pressure, in units of [m^2/s].
.. math::
\alpha = \frac{k}{\rho Cp}
Utilizes the temperature and pressure dependent object oriented
interfaces :obj:`thermo.volume.VolumeGas`,
:obj:`thermo.thermal_conductivity.ThermalConductivityGas`,
and :obj:`thermo.heat_capacity.HeatCapacityGas` to calculate the
actual properties.
Examples
--------
>>> Chemical('ammonia').alphag
1.6931865425158556e-05
'''
kg, rhog, Cpg = self.kg, self.rhog, self.Cpg
if all([kg, rhog, Cpg]):
return thermal_diffusivity(k=kg, rho=rhog, Cp=Cpg)
return None
@property
def Prl(self):
r'''Prandtl number of the liquid phase of the chemical at its
current temperature and pressure, [dimensionless].
.. math::
Pr = \frac{C_p \mu}{k}
Utilizes the temperature and pressure dependent object oriented
interfaces :obj:`thermo.viscosity.ViscosityLiquid`,
:obj:`thermo.thermal_conductivity.ThermalConductivityLiquid`,
and :obj:`thermo.heat_capacity.HeatCapacityLiquid` to calculate the
actual properties.
Examples
--------
>>> Chemical('nitrogen', T=70).Prl
2.7828214501488886
'''
Cpl, mul, kl = self.Cpl, self.mul, self.kl
if all([Cpl, mul, kl]):
return Prandtl(Cp=Cpl, mu=mul, k=kl)
return None
@property
def Prg(self):
r'''Prandtl number of the gas phase of the chemical at its
current temperature and pressure, [dimensionless].
.. math::
Pr = \frac{C_p \mu}{k}
Utilizes the temperature and pressure dependent object oriented
interfaces :obj:`thermo.viscosity.ViscosityGas`,
:obj:`thermo.thermal_conductivity.ThermalConductivityGas`,
and :obj:`thermo.heat_capacity.HeatCapacityGas` to calculate the
actual properties.
Examples
--------
>>> Chemical('NH3').Prg
0.847263731933008
'''
Cpg, mug, kg = self.Cpg, self.mug, self.kg
if all([Cpg, mug, kg]):
return Prandtl(Cp=Cpg, mu=mug, k=kg)
return None
@property
def solubility_parameter(self):
r'''Solubility parameter of the chemical at its
current temperature and pressure, in units of [Pa^0.5].
.. math::
\delta = \sqrt{\frac{\Delta H_{vap} - RT}{V_m}}
Calculated based on enthalpy of vaporization and molar volume.
Normally calculated at STP. For uses of this property, see
:obj:`thermo.solubility.solubility_parameter`.
Examples
--------
>>> Chemical('NH3').solubility_parameter
24766.329043856073
'''
try:
return solubility_parameter(T=self.T, Hvapm=self.Hvapm, Vml=self.Vml)
except:
return None
@property
def Parachor(self):
r'''Parachor of the chemical at its
current temperature and pressure, in units of [N^0.25*m^2.75/mol].
.. math::
P = \frac{\sigma^{0.25} MW}{\rho_L - \rho_V}
Calculated based on surface tension, density of the liquid
phase, and molecular weight. For uses of this property, see
:obj:`thermo.utils.Parachor`.
The gas density is calculated using the ideal-gas law.
Examples
--------
>>> Chemical('octane').Parachor
6.2e-05
'''
sigma, rhol = self.sigma, self.rhol
rhog = Vm_to_rho(ideal_gas(T=self.T, P=self.P), MW=self.MW)
if all((sigma, rhol, rhog, self.MW)):
return Parachor(sigma=sigma, MW=self.MW, rhol=rhol, rhog=rhog)
return None
### Single-phase properties
@property
def Cp(self):
r'''Mass heat capacity of the chemical at its current phase and
temperature, in units of [J/kg/K].
Utilizes the object oriented interfaces
:obj:`thermo.heat_capacity.HeatCapacitySolid`,
:obj:`thermo.heat_capacity.HeatCapacityLiquid`,
and :obj:`thermo.heat_capacity.HeatCapacityGas` to perform the
actual calculation of each property. Note that those interfaces provide
output in molar units (J/mol/K).
Examples
--------
>>> w = Chemical('water')
>>> w.Cp, w.phase
(4180.597021827336, 'l')
>>> Chemical('palladium').Cp
234.26767209171211
'''
return phase_select_property(phase=self.phase, s=Chemical.Cps, l=Chemical.Cpl, g=Chemical.Cpg, self=self)
@property
def Cpm(self):
r'''Molar heat capacity of the chemical at its current phase and
temperature, in units of [J/mol/K].
Utilizes the object oriented interfaces
:obj:`thermo.heat_capacity.HeatCapacitySolid`,
:obj:`thermo.heat_capacity.HeatCapacityLiquid`,
and :obj:`thermo.heat_capacity.HeatCapacityGas` to perform the
actual calculation of each property.
Examples
--------
>>> Chemical('cubane').Cpm
137.05489206785944
>>> Chemical('ethylbenzene', T=550, P=3E6).Cpm
294.18449553310046
'''
return phase_select_property(phase=self.phase, s=Chemical.Cpsm,
l=Chemical.Cplm, g=Chemical.Cpgm,
self=self)
@property
def Vm(self):
r'''Molar volume of the chemical at its current phase and
temperature and pressure, in units of [m^3/mol].
Utilizes the object oriented interfaces
:obj:`thermo.volume.VolumeSolid`,
:obj:`thermo.volume.VolumeLiquid`,
and :obj:`thermo.volume.VolumeGas` to perform the
actual calculation of each property.
Examples
--------
>>> Chemical('ethylbenzene', T=550, P=3E6).Vm
0.00017758024401627633
'''
return phase_select_property(phase=self.phase, s=Chemical.Vms,
l=Chemical.Vml, g=Chemical.Vmg, self=self)
@property
def rho(self):
r'''Mass density of the chemical at its current phase and
temperature and pressure, in units of [kg/m^3].
Utilizes the object oriented interfaces
:obj:`thermo.volume.VolumeSolid`,
:obj:`thermo.volume.VolumeLiquid`,
and :obj:`thermo.volume.VolumeGas` to perform the
actual calculation of each property. Note that those interfaces provide
output in units of m^3/mol.
Examples
--------
>>> Chemical('decane', T=550, P=2E6).rho
498.67008448640604
'''
return phase_select_property(phase=self.phase, s=Chemical.rhos,
l=Chemical.rhol, g=Chemical.rhog,
self=self)
@property
def rhom(self):
r'''Molar density of the chemical at its current phase and
temperature and pressure, in units of [mol/m^3].
Utilizes the object oriented interfaces
:obj:`thermo.volume.VolumeSolid`,
:obj:`thermo.volume.VolumeLiquid`,
and :obj:`thermo.volume.VolumeGas` to perform the
actual calculation of each property. Note that those interfaces provide
output in units of m^3/mol.
Examples
--------
>>> Chemical('1-hexanol').rhom
7983.414573003429
'''
return phase_select_property(phase=self.phase, s=Chemical.rhosm,
l=Chemical.rholm, g=Chemical.rhogm,
self=self)
@property
def Z(self):
r'''Compressibility factor of the chemical at its current phase and
temperature and pressure, [dimensionless].
Examples
--------
>>> Chemical('MTBE', T=900, P=1E-2).Z
0.9999999999079768
'''
Vm = self.Vm
if Vm:
return Z(self.T, self.P, Vm)
return None
@property
def SG(self):
r'''Specific gravity of the chemical, [dimensionless].
For gas-phase conditions, this is calculated at 15.6 °C (60 °F) and 1
atm for the chemical and the reference fluid, air.
For liquid and solid phase conditions, this is calculated based on a
reference fluid of water at 4°C at 1 atm, but the with the liquid or
solid chemical's density at the currently specified conditions.
Examples
--------
>>> Chemical('MTBE').SG
0.7428160596603596
'''
phase = self.phase
if phase == 'l':
return self.SGl
elif phase == 's':
return self.SGs
elif phase == 'g':
return self.SGg
rho = self.rho
if rho is not None:
return SG(rho)
return None
@property
def isobaric_expansion(self):
r'''Isobaric (constant-pressure) expansion of the chemical at its
current phase and temperature, in units of [1/K].
.. math::
\beta = \frac{1}{V}\left(\frac{\partial V}{\partial T} \right)_P
Examples
--------
Radical change in value just above and below the critical temperature
of water:
>>> Chemical('water', T=647.1, P=22048320.0).isobaric_expansion
0.34074205839222449
>>> Chemical('water', T=647.2, P=22048320.0).isobaric_expansion
0.18143324022215077
'''
return phase_select_property(phase=self.phase,
l=Chemical.isobaric_expansion_l,
g=Chemical.isobaric_expansion_g,
self=self)
@property
def JT(self):
r'''Joule Thomson coefficient of the chemical at its
current phase and temperature, in units of [K/Pa].
.. math::
\mu_{JT} = \left(\frac{\partial T}{\partial P}\right)_H = \frac{1}{C_p}
\left[T \left(\frac{\partial V}{\partial T}\right)_P - V\right]
= \frac{V}{C_p}\left(\beta T-1\right)
Examples
--------
>>> Chemical('water').JT
-2.2150394958666407e-07
'''
return phase_select_property(phase=self.phase, l=Chemical.JTl,
g=Chemical.JTg, self=self)
@property
def mu(self):
r'''Viscosity of the chemical at its current phase, temperature, and
pressure in units of [Pa*s].
Utilizes the object oriented interfaces
:obj:`thermo.viscosity.ViscosityLiquid` and
:obj:`thermo.viscosity.ViscosityGas` to perform the
actual calculation of each property.
Examples
--------
>>> Chemical('ethanol', T=300).mu
0.001044526538460911
>>> Chemical('ethanol', T=400).mu
1.1853097849748217e-05
'''
return phase_select_property(phase=self.phase, l=Chemical.mul,
g=Chemical.mug, self=self)
@property
def k(self):
r'''Thermal conductivity of the chemical at its current phase,
temperature, and pressure in units of [W/m/K].
Utilizes the object oriented interfaces
:obj:`thermo.thermal_conductivity.ThermalConductivityLiquid` and
:obj:`thermo.thermal_conductivity.ThermalConductivityGas` to perform
the actual calculation of each property.
Examples
--------
>>> Chemical('ethanol', T=300).kl
0.16313594741877802
>>> Chemical('ethanol', T=400).kg
0.026019924109310026
'''
return phase_select_property(phase=self.phase, s=None, l=Chemical.kl,
g=Chemical.kg, self=self)
@property
def nu(self):
r'''Kinematic viscosity of the the chemical at its current temperature,
pressure, and phase in units of [m^2/s].
.. math::
\nu = \frac{\mu}{\rho}
Examples
--------
>>> Chemical('argon').nu
1.3846930410865003e-05
'''
return phase_select_property(phase=self.phase, l=Chemical.nul,
g=Chemical.nug, self=self)
@property
def alpha(self):
r'''Thermal diffusivity of the chemical at its current temperature,
pressure, and phase in units of [m^2/s].
.. math::
\alpha = \frac{k}{\rho Cp}
Examples
--------
>>> Chemical('furfural').alpha
8.696537158635412e-08
'''
return phase_select_property(phase=self.phase, l=Chemical.alphal,
g=Chemical.alphag, self=self)
@property
def Pr(self):
r'''Prandtl number of the chemical at its current temperature,
pressure, and phase; [dimensionless].
.. math::
Pr = \frac{C_p \mu}{k}
Examples
--------
>>> Chemical('acetone').Pr
4.183039103542709
'''
return phase_select_property(phase=self.phase, l=Chemical.Prl,
g=Chemical.Prg, self=self)
@property
def Poynting(self):
r'''Poynting correction factor [dimensionless] for use in phase
equilibria methods based on activity coefficients or other reference
states. Performs the shortcut calculation assuming molar volume is
independent of pressure.
.. math::
\text{Poy} = \exp\left[\frac{V_l (P-P^{sat})}{RT}\right]
The full calculation normally returns values very close to the
approximate ones. This property is defined in terms of
pure components only.
Examples
--------
>>> Chemical('pentane', T=300, P=1E7).Poynting
1.5743051250679803
Notes
-----
The full equation shown below can be used as follows:
.. math::
\text{Poy} = \exp\left[\frac{\int_{P_i^{sat}}^P V_i^l dP}{RT}\right]
>>> from scipy.integrate import quad
>>> c = Chemical('pentane', T=300, P=1E7)
>>> exp(quad(lambda P : c.VolumeLiquid(c.T, P), c.Psat, c.P)[0]/R/c.T)
1.5821826990975127
'''
Vml, Psat = self.Vml, self.Psat
if Vml and Psat:
return exp(Vml*(self.P-Psat)/R/self.T)
return None
def Tsat(self, P):
return self.VaporPressure.solve_property(P)
### Convenience Dimensionless numbers
def Reynolds(self, V=None, D=None):
return Reynolds(V=V, D=D, rho=self.rho, mu=self.mu)
def Capillary(self, V=None):
return Capillary(V=V, mu=self.mu, sigma=self.sigma)
def Weber(self, V=None, D=None):
return Weber(V=V, L=D, rho=self.rho, sigma=self.sigma)
def Bond(self, L=None):
return Bond(rhol=self.rhol, rhog=self.rhog, sigma=self.sigma, L=L)
def Jakob(self, Tw=None):
return Jakob(Cp=self.Cp, Hvap=self.Hvap, Te=Tw-self.T)
def Grashof(self, Tw=None, L=None):
return Grashof(L=L, beta=self.isobaric_expansion, T1=Tw, T2=self.T,
rho=self.rho, mu=self.mu)
def Peclet_heat(self, V=None, D=None):
return Peclet_heat(V=V, L=D, rho=self.rho, Cp=self.Cp, k=self.k)
|
CalebBell/thermo
|
thermo/chemical.py
|
Python
|
mit
| 123,245
|
[
"RDKit"
] |
b12635671646bbba45297cfdbe044d96539e862e23a31e22fb80af73a589a046
|
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""The Autoregressive distribution."""
import warnings
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.distributions import distribution
from tensorflow_probability.python.internal import assert_util
from tensorflow_probability.python.internal import samplers
from tensorflow_probability.python.internal import tensor_util
from tensorflow_probability.python.internal import tensorshape_util
from tensorflow_probability.python.util.seed_stream import SeedStream
from tensorflow_probability.python.util.seed_stream import TENSOR_SEED_MSG_PREFIX
from tensorflow.python.util import deprecation # pylint: disable=g-direct-tensorflow-import
# Cause all warnings to always be triggered.
# Not having this means subsequent calls won't trigger the warning.
warnings.filterwarnings('always',
module='tensorflow_probability.*autoregressive',
append=True) # Don't override user-set filters.
class Autoregressive(distribution.Distribution):
"""Autoregressive distributions.
The Autoregressive distribution enables learning (often) richer multivariate
distributions by repeatedly applying a [diffeomorphic](
https://en.wikipedia.org/wiki/Diffeomorphism) transformation (such as
implemented by `Bijector`s). Regarding terminology,
'Autoregressive models decompose the joint density as a product of
conditionals, and model each conditional in turn. Normalizing flows
transform a base density (e.g. a standard Gaussian) into the target density
by an invertible transformation with tractable Jacobian.' [(Papamakarios et
al., 2016)][1]
In other words, the 'autoregressive property' is equivalent to the
decomposition, `p(x) = prod{ p(x[i] | x[0:i]) : i=0, ..., d }`. The provided
`shift_and_log_scale_fn`, `masked_autoregressive_default_template`, achieves
this property by zeroing out weights in its `masked_dense` layers.
Practically speaking the autoregressive property means that there exists a
permutation of the event coordinates such that each coordinate is a
diffeomorphic function of only preceding coordinates
[(van den Oord et al., 2016)][2].
#### Mathematical Details
The probability function is
```none
prob(x; fn, n) = fn(x).prob(x)
```
And a sample is generated by
```none
x = fn(...fn(fn(x0).sample()).sample()).sample()
```
where the ellipses (`...`) represent `n-2` composed calls to `fn`, `fn`
constructs a `tfd.Distribution`-like instance, and `x0` is a
fixed initializing `Tensor`.
#### Examples
```python
tfd = tfp.distributions
tfb = tfp.bijectors
def _normal_fn(event_size):
n = event_size * (event_size + 1) // 2
p = tf.Variable(tfd.Normal(loc=0., scale=1.).sample(n))
affine = tfb.FillScaleTriL(tfp.math.fill_triangular(0.25 * p))
def _fn(samples):
scale = tf.exp(affine(samples))
return tfd.Independent(
tfd.Normal(loc=0., scale=scale, validate_args=True),
reinterpreted_batch_ndims=1)
return _fn
batch_and_event_shape = [3, 2, 4]
sample0 = tf.zeros(batch_and_event_shape)
ar = tfd.Autoregressive(
_normal_fn(batch_and_event_shape[-1]), sample0)
x = ar.sample([6, 5])
# ==> x.shape = [6, 5, 3, 2, 4]
prob_x = ar.prob(x)
# ==> x.shape = [6, 5, 3, 2]
```
#### References
[1]: George Papamakarios, Theo Pavlakou, and Iain Murray. Masked
Autoregressive Flow for Density Estimation. In _Neural Information
Processing Systems_, 2017. https://arxiv.org/abs/1705.07057
[2]: Aaron van den Oord, Nal Kalchbrenner, Oriol Vinyals, Lasse Espeholt,
Alex Graves, and Koray Kavukcuoglu. Conditional Image Generation with
PixelCNN Decoders. In _Neural Information Processing Systems_, 2016.
https://arxiv.org/abs/1606.05328
"""
def __init__(self,
distribution_fn,
sample0=None,
num_steps=None,
validate_args=False,
allow_nan_stats=True,
name='Autoregressive'):
"""Construct an `Autoregressive` distribution.
Args:
distribution_fn: Python `callable` which constructs a
`tfd.Distribution`-like instance from a `Tensor` (e.g.,
`sample0`). The function must respect the 'autoregressive property',
i.e., there exists a permutation of event such that each coordinate is a
diffeomorphic function of only preceding coordinates.
sample0: Initial input to `distribution_fn`; used to
build the distribution in `__init__` which in turn specifies this
distribution's properties, e.g., `event_shape`, `batch_shape`, `dtype`.
If unspecified, then `distribution_fn` should be default constructable.
num_steps: Number of times `distribution_fn` is composed from samples,
e.g., `num_steps=2` implies
`distribution_fn(distribution_fn(sample0).sample(n)).sample()`.
validate_args: Python `bool`. Whether to validate input with asserts.
If `validate_args` is `False`, and the inputs are invalid,
correct behavior is not guaranteed.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value '`NaN`' to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Default value: 'Autoregressive'.
Raises:
ValueError: if `num_steps < 1`.
"""
parameters = dict(locals())
with tf.name_scope(name) as name:
self._distribution_fn = distribution_fn
self._sample0 = tensor_util.convert_nonref_to_tensor(sample0)
self._num_steps = tensor_util.convert_nonref_to_tensor(
num_steps, dtype_hint=tf.int32)
# We need to call `distribution_fn` once here to determine the `dtype`
# and `reparameterization_type` of this distribution. We don't otherwise
# use the resulting `distribution0`, so this is '`tf.Variable` safe'
# as long as `distribution_fn` returns `tfd.Distribution` instances with
# consistent `dtype` and `reparameterization_type`.
if self._sample0 is not None:
distribution0 = self._distribution_fn(self._sample0)
else:
distribution0 = self._distribution_fn()
super(Autoregressive, self).__init__(
dtype=distribution0.dtype,
reparameterization_type=distribution0.reparameterization_type,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
name=name)
@property
def distribution_fn(self):
return self._distribution_fn
@property
def sample0(self):
return self._sample0
@property
def num_steps(self):
if self._num_steps is None:
return self._num_steps_deprecated_behavior()
return self._num_steps
@property
def experimental_is_sharded(self):
return self._get_distribution0().experimental_is_sharded
@deprecation.deprecated(
'2020-02-15',
'The `num_steps` property will return `None` when the distribution is '
'constructed with with `num_steps=None`. Use '
'`tf.reduce_prod(event_shape_tensor())` instead.',
warn_once=True)
def _num_steps_deprecated_behavior(self):
distribution0 = self._get_distribution0()
num_steps_static = tensorshape_util.num_elements(distribution0.event_shape)
if num_steps_static is not None:
return num_steps_static
return tf.reduce_prod(distribution0.event_shape_tensor())
@property
@deprecation.deprecated(
'2020-02-15',
'The `distribution0` property is deprecated. '
'Use `distribution_fn()` or `distribution_fn(sample0)` instead.',
warn_once=True)
def distribution0(self):
return self._get_distribution0()
def _get_distribution0(self):
if self._sample0 is not None:
ret = self._distribution_fn(self._sample0)
else:
ret = self._distribution_fn()
if ret.dtype != self.dtype:
raise ValueError(
'`distribution_fn` returned distributions with different dtype -- '
'previously {} and now {}'.format(self.dtype, ret.dtype))
if ret.reparameterization_type != self.reparameterization_type:
raise ValueError(
'`distribution_fn` returned distributions with different '
'reparameterize_type -- previously {} and now {}'.format(
self.reparameterization_type, ret.reparameterization_type))
return ret
def _batch_shape(self):
# NOTE: The batch shape of the output of `self._distribution_fn(...)` could
# depend on values (or the shape of such values) read from variables during
# the execution of `distribution_fn`. Thus, in general, we cannot
# statically determine the batch shape here.
#
# Also, `self._distribution_fn(...)` could have graph side effects.
return tf.TensorShape(None)
def _batch_shape_tensor(self):
return self._get_distribution0().batch_shape_tensor()
def _event_shape(self):
# NOTE: The event shape of the output of `self._distribution_fn(...)` could
# depend on values (or the shape of such values) read from variables during
# the execution of `distribution_fn`. Thus, in general, we cannot
# statically determine the event shape here.
#
# Also, `self._distribution_fn(...)` could have graph side effects.
return tf.TensorShape(None)
def _event_shape_tensor(self):
return self._get_distribution0().event_shape_tensor()
def _sample_n(self, n, seed=None):
distribution0 = self._get_distribution0()
if self._num_steps is not None:
num_steps = tf.convert_to_tensor(self._num_steps)
num_steps_static = tf.get_static_value(num_steps)
else:
num_steps_static = tensorshape_util.num_elements(
distribution0.event_shape)
if num_steps_static is None:
num_steps = tf.reduce_prod(distribution0.event_shape_tensor())
stateless_seed = samplers.sanitize_seed(seed, salt='Autoregressive')
stateful_seed = None
try:
samples = distribution0.sample(n, seed=stateless_seed)
is_stateful_sampler = False
except TypeError as e:
if ('Expected int for argument' not in str(e) and
TENSOR_SEED_MSG_PREFIX not in str(e)):
raise
msg = (
'Falling back to stateful sampling for `distribution_fn(sample0)` of '
'type `{}`. Please update to use `tf.random.stateless_*` RNGs. '
'This fallback may be removed after 20-Aug-2020. ({})')
warnings.warn(msg.format(distribution0.name,
type(distribution0),
str(e)))
stateful_seed = SeedStream(seed, salt='Autoregressive')()
samples = distribution0.sample(n, seed=stateful_seed)
is_stateful_sampler = True
seed = stateful_seed if is_stateful_sampler else stateless_seed
if num_steps_static is not None:
for _ in range(num_steps_static):
# pylint: disable=not-callable
samples = self.distribution_fn(samples).sample(seed=seed)
else:
# pylint: disable=not-callable
samples = tf.foldl(lambda s, _: self.distribution_fn(s).sample(seed=seed),
elems=tf.range(0, num_steps), initializer=samples)
return samples
def _log_prob(self, value):
# pylint: disable=not-callable
return self.distribution_fn(value).log_prob(value)
def _prob(self, value):
# pylint: disable=not-callable
return self.distribution_fn(value).prob(value)
def _parameter_control_dependencies(self, is_init):
if not self.validate_args:
return []
assertions = []
if self._num_steps is not None:
if is_init != tensor_util.is_ref(self._num_steps):
assertions.append(assert_util.assert_rank(
self._num_steps, 0,
message='Argument `num_steps` must be a scalar'))
assertions.append(assert_util.assert_positive(
self._num_steps, message='Argument `num_steps` must be positive'))
return assertions
def _default_event_space_bijector(self):
return self._get_distribution0().experimental_default_event_space_bijector()
|
tensorflow/probability
|
tensorflow_probability/python/distributions/autoregressive.py
|
Python
|
apache-2.0
| 12,954
|
[
"Gaussian"
] |
cdedec84836812b95c6bb753b1b83db4c21ab45d547d1c2c094b0dd7933291a6
|
import json
from django.conf import settings
from django.test import TestCase
from django.core.cache import cache
import mock
from nose.tools import ok_, eq_
from airmozilla.base import mozillians
from airmozilla.base.tests.testbase import Response
VOUCHED_FOR_USERS = """
{
"count": 1,
"next": null,
"results": [
{
"username": "peterbe",
"_url": "https://muzillians.fake/api/v2/users/99999/",
"is_vouched": true
}
],
"previous": null
}
"""
NO_USERS = """
{
"count": 0,
"next": null,
"results": [],
"previous": null
}
"""
VOUCHED_FOR = """
{
"photo": {
"300x300": "https://muzillians.fake/media/uplo...1caee0.jpg",
"150x150": "https://muzillians.fake/media/uplo...5636261.jpg",
"500x500": "https://muzillians.fake/media/uplo...6465a73.jpg",
"value": "https://muzillians.fake/media/uploa...71caee0.jpg",
"privacy": "Public"
},
"date_mozillian": {
"value": null,
"privacy": "Mozillians"
},
"full_name": {
"value": "Peter Bengtsson",
"privacy": "Public"
},
"title": {
"value": "",
"privacy": "Mozillians"
},
"external_accounts": [],
"alternate_emails": [],
"email": {
"value": "peterbe@mozilla.com",
"privacy": "Mozillians"
},
"username": "peterbe",
"is_public": true,
"url": "https://muzillians.fake/en-US/u/peterbe/",
"country": {
"code": "us",
"value": "United States",
"privacy": "Public"
},
"websites": [
{
"website": "http://www.peterbe.com/",
"privacy": "Public"
}
],
"_url": "https://muzillians.fake/api/v2/users/441/",
"story_link": {
"value": "",
"privacy": "Mozillians"
},
"ircname": {
"value": "peterbe",
"privacy": "Public"
},
"is_vouched": true
}
"""
NOT_VOUCHED_FOR = """
{
"photo": {
"300x300": "https://muzillians.fake/media/uplo...1caee0.jpg",
"150x150": "https://muzillians.fake/media/uplo...5636261.jpg",
"500x500": "https://muzillians.fake/media/uplo...6465a73.jpg",
"value": "https://muzillians.fake/media/uploa...71caee0.jpg",
"privacy": "Public"
},
"date_mozillian": {
"value": null,
"privacy": "Mozillians"
},
"full_name": {
"value": "Peter Bengtsson",
"privacy": "Private"
},
"title": {
"value": "",
"privacy": "Mozillians"
},
"alternate_emails": [],
"email": {
"value": "peterbe@mozilla.com",
"privacy": "Mozillians"
},
"username": "tmickel",
"bio": {
"html": "<p>Web developer at Mozilla</p>",
"value": "Web developer at Mozilla",
"privacy": "Public"
},
"is_public": true,
"url": "https://muzillians.fake/en-US/u/peterbe/",
"websites": [
{
"website": "http://www.peterbe.com/",
"privacy": "Public"
}
],
"_url": "https://muzillians.fake/api/v2/users/441/",
"story_link": {
"value": "",
"privacy": "Mozillians"
},
"ircname": {
"value": "peterbe",
"privacy": "Public"
},
"is_vouched": false
}
"""
VOUCHED_FOR_NO_USERNAME = """
{
"meta": {
"previous": null,
"total_count": 1,
"offset": 0,
"limit": 20,
"next": null
},
"objects": [
{
"website": "",
"bio": "",
"resource_uri": "/api/v1/users/2429/",
"last_updated": "2012-11-06T14:41:47",
"groups": [
"ugly tuna"
],
"city": "Casino",
"skills": [],
"country": "Albania",
"region": "Bush",
"id": "2429",
"languages": [],
"allows_mozilla_sites": true,
"photo": "http://www.gravatar.com/avatar/0409b497734934400822bb33...",
"is_vouched": true,
"email": "peterbe@gmail.com",
"ircname": "",
"allows_community_sites": true
}
]
}
"""
NOT_VOUCHED_FOR_USERS = """
{
"count": 1,
"next": null,
"results": [
{
"username": "tmickel@mit.edu",
"_url": "https://muzillians.fake/api/v2/users/00000/",
"is_vouched": false
}
],
"previous": null
}
"""
NO_VOUCHED_FOR = """
{
"meta": {
"previous": null,
"total_count": 0,
"offset": 0,
"limit": 20,
"next": null
},
"objects": []
}
"""
GROUPS1 = """
{
"count": 3,
"previous": null,
"results": [
{
"url": "https://muzillians.fake/en-US/group/9090909/",
"_url": "https://muzillians.fake/api/v2/groups/909090/",
"id": 12426,
"member_count": 3,
"name": "GROUP NUMBER 1"
},
{
"url": "https://muzillians.fake/en-US/group/2009-intern/",
"_url": "https://muzillians.fake/api/v2/groups/08080808/",
"id": 196,
"member_count": 7,
"name": "GROUP NUMBER 2"
}
],
"next": "https://muzillians.fake/api/v2/groups/?api-key=xxxkey&page=2"
}
"""
GROUPS2 = """
{
"count": 3,
"previous": "https://muzillians.fake/api/v2/groups/?api-key=xxxkey",
"results": [
{
"url": "https://muzillians.fake/en-US/group/2013summitassembly/",
"_url": "https://muzillians.fake/api/v2/groups/02020202/",
"id": 2002020,
"member_count": 53,
"name": "GROUP NUMBER 3"
}
],
"next": null
}
"""
assert json.loads(VOUCHED_FOR_USERS)
assert json.loads(VOUCHED_FOR)
assert json.loads(NOT_VOUCHED_FOR_USERS)
assert json.loads(NO_VOUCHED_FOR)
assert json.loads(GROUPS1)
assert json.loads(GROUPS2)
class TestMozillians(TestCase):
@mock.patch('requests.get')
def test_is_vouched(self, rget):
def mocked_get(url, **options):
if 'tmickel' in url:
return Response(NOT_VOUCHED_FOR_USERS)
if 'peterbe' in url:
return Response(VOUCHED_FOR_USERS)
if 'trouble' in url:
return Response('Failed', status_code=500)
raise NotImplementedError(url)
rget.side_effect = mocked_get
ok_(not mozillians.is_vouched('tmickel@mit.edu'))
ok_(mozillians.is_vouched('peterbe@gmail.com'))
self.assertRaises(
mozillians.BadStatusCodeError,
mozillians.is_vouched,
'trouble@live.com'
)
# also check that the API key is scrubbed
try:
mozillians.is_vouched('trouble@live.com')
raise
except mozillians.BadStatusCodeError as msg:
ok_(settings.MOZILLIANS_API_KEY not in str(msg))
@mock.patch('requests.get')
def test_is_not_vouched(self, rget):
def mocked_get(url, **options):
if 'tmickel' in url:
return Response(NOT_VOUCHED_FOR_USERS)
raise NotImplementedError(url)
rget.side_effect = mocked_get
ok_(not mozillians.is_vouched('tmickel@mit.edu'))
@mock.patch('requests.get')
def test_fetch_user_name(self, rget):
def mocked_get(url, **options):
if '/v2/users/99999' in url:
return Response(VOUCHED_FOR)
if '/v2/users/00000' in url:
return Response(NOT_VOUCHED_FOR)
if 'peterbe' in url:
return Response(VOUCHED_FOR_USERS)
if 'tmickel' in url:
return Response(NOT_VOUCHED_FOR_USERS)
raise NotImplementedError(url)
rget.side_effect = mocked_get
result = mozillians.fetch_user_name('peterbe@gmail.com')
eq_(result, 'Peter Bengtsson')
result = mozillians.fetch_user_name('tmickel@mit.edu')
eq_(result, None)
@mock.patch('requests.get')
def test_fetch_user_name_failure(self, rget):
"""if the fetching of a single user barfs it shouldn't reveal
the API key"""
def mocked_get(url, **options):
if 'peterbe' in url:
return Response(VOUCHED_FOR_USERS)
return Response('Failed', status_code=500)
rget.side_effect = mocked_get
try:
mozillians.fetch_user('peterbe@gmail.com')
raise AssertionError("shouldn't happen")
except mozillians.BadStatusCodeError as msg:
ok_(settings.MOZILLIANS_API_KEY not in str(msg))
ok_('xxxscrubbedxxx' in str(msg))
@mock.patch('requests.get')
def test_fetch_user_name_no_user_name(self, rget):
def mocked_get(url, **options):
if '/v2/users/99999' in url:
return Response(VOUCHED_FOR_NO_USERNAME)
if 'peterbe' in url and '/v2/users/' in url:
return Response(VOUCHED_FOR_USERS)
raise NotImplementedError(url)
rget.side_effect = mocked_get
result = mozillians.fetch_user_name('peterbe@gmail.com')
eq_(result, None)
@mock.patch('requests.get')
def test_in_group(self, rget):
def mocked_get(url, **options):
if 'peterbe' in url:
if 'group=losers' in url:
return Response(NO_USERS)
if 'group=winners' in url:
return Response(VOUCHED_FOR_USERS)
raise NotImplementedError(url)
rget.side_effect = mocked_get
ok_(not mozillians.in_group('peterbe@gmail.com', 'losers'))
ok_(mozillians.in_group('peterbe@gmail.com', 'winners'))
@mock.patch('requests.get')
def test_get_all_groups(self, rget):
calls = []
def mocked_get(url, **options):
calls.append(url)
if '/v2/groups/' in url and 'page=2' in url:
return Response(GROUPS2)
if '/v2/groups/' in url:
return Response(GROUPS1)
raise NotImplementedError(url)
rget.side_effect = mocked_get
all = mozillians.get_all_groups()
eq_(len(all), 3)
eq_(all[0]['name'], 'GROUP NUMBER 1')
eq_(all[1]['name'], 'GROUP NUMBER 2')
eq_(all[2]['name'], 'GROUP NUMBER 3')
eq_(len(calls), 2)
@mock.patch('requests.get')
def test_get_all_groups_failure(self, rget):
def mocked_get(url, **options):
return Response('Failed', status_code=500)
rget.side_effect = mocked_get
try:
mozillians.get_all_groups()
raise AssertionError("shouldn't happen")
except mozillians.BadStatusCodeError as msg:
ok_(settings.MOZILLIANS_API_KEY not in str(msg))
ok_('xxxscrubbedxxx' in str(msg))
@mock.patch('requests.get')
def test_get_all_groups_cached(self, rget):
cache.clear()
calls = []
def mocked_get(url, **options):
calls.append(url)
if '/v2/groups/' in url and 'page=2' in url:
return Response(GROUPS2)
if '/v2/groups/' in url:
return Response(GROUPS1)
raise NotImplementedError(url)
rget.side_effect = mocked_get
all = mozillians.get_all_groups_cached()
eq_(len(all), 3)
eq_(len(calls), 2)
# a second time
all = mozillians.get_all_groups_cached()
eq_(len(all), 3)
eq_(len(calls), 2)
|
kenrick95/airmozilla
|
airmozilla/base/tests/test_mozillians.py
|
Python
|
bsd-3-clause
| 10,869
|
[
"CASINO"
] |
db5f89a5d3080c8357f73870bb1eadd2006486affa3aca514b8d506cd4ae55c7
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tool for deploying apps to an app server.
Currently, the application only uploads new appversions. To do this, it first
walks the directory tree rooted at the path the user specifies, adding all the
files it finds to a list. It then uploads the application configuration
(app.yaml) to the server using HTTP, followed by uploading each of the files.
It then commits the transaction with another request.
The bulk of this work is handled by the AppVersionUpload class, which exposes
methods to add to the list of files, fetch a list of modified files, upload
files, and commit or rollback the transaction.
"""
import calendar
import datetime
import getpass
import logging
import mimetypes
import optparse
import os
import random
import re
import sha
import sys
import tempfile
import time
import urllib
import urllib2
import google
import yaml
from google.appengine.cron import groctimespecification
from google.appengine.api import appinfo
from google.appengine.api import croninfo
from google.appengine.api import dosinfo
from google.appengine.api import queueinfo
from google.appengine.api import validation
from google.appengine.api import yaml_errors
from google.appengine.api import yaml_object
from google.appengine.datastore import datastore_index
from google.appengine.tools import appengine_rpc
from google.appengine.tools import bulkloader
MAX_FILES_TO_CLONE = 100
LIST_DELIMITER = '\n'
TUPLE_DELIMITER = '|'
VERSION_FILE = '../VERSION'
UPDATE_CHECK_TIMEOUT = 3
NAG_FILE = '.appcfg_nag'
MAX_LOG_LEVEL = 4
MAX_BATCH_SIZE = 1000000
MAX_BATCH_COUNT = 100
MAX_BATCH_FILE_SIZE = 200000
BATCH_OVERHEAD = 500
verbosity = 1
appinfo.AppInfoExternal.ATTRIBUTES[appinfo.RUNTIME] = 'python'
_api_versions = os.environ.get('GOOGLE_TEST_API_VERSIONS', '1')
_options = validation.Options(*_api_versions.split(','))
appinfo.AppInfoExternal.ATTRIBUTES[appinfo.API_VERSION] = _options
del _api_versions, _options
def StatusUpdate(msg):
"""Print a status message to stderr.
If 'verbosity' is greater than 0, print the message.
Args:
msg: The string to print.
"""
if verbosity > 0:
print >>sys.stderr, msg
def GetMimeTypeIfStaticFile(config, filename):
"""Looks up the mime type for 'filename'.
Uses the handlers in 'config' to determine if the file should
be treated as a static file.
Args:
config: The app.yaml object to check the filename against.
filename: The name of the file.
Returns:
The mime type string. For example, 'text/plain' or 'image/gif'.
None if this is not a static file.
"""
for handler in config.handlers:
handler_type = handler.GetHandlerType()
if handler_type in ('static_dir', 'static_files'):
if handler_type == 'static_dir':
regex = os.path.join(re.escape(handler.GetHandler()), '.*')
else:
regex = handler.upload
if re.match(regex, filename):
if handler.mime_type is not None:
return handler.mime_type
else:
guess = mimetypes.guess_type(filename)[0]
if guess is None:
default = 'application/octet-stream'
print >>sys.stderr, ('Could not guess mimetype for %s. Using %s.'
% (filename, default))
return default
return guess
return None
def LookupErrorBlob(config, filename):
"""Looks up the mime type and error_code for 'filename'.
Uses the error handlers in 'config' to determine if the file should
be treated as an error blob.
Args:
config: The app.yaml object to check the filename against.
filename: The name of the file.
Returns:
A tuple of (mime_type, error_code), or (None, None) if this is not an error
blob. For example, ('text/plain', default) or ('image/gif', timeout) or
(None, None).
"""
if not config.error_handlers:
return (None, None)
for error_handler in config.error_handlers:
if error_handler.file == filename:
error_code = error_handler.error_code
if not error_code:
error_code = 'default'
if error_handler.mime_type is not None:
return (error_handler.mime_type, error_code)
else:
guess = mimetypes.guess_type(filename)[0]
if guess is None:
default = 'application/octet-stream'
print >>sys.stderr, ('Could not guess mimetype for %s. Using %s.'
% (filename, default))
return (default, error_code)
return (guess, error_code)
return (None, None)
def BuildClonePostBody(file_tuples):
"""Build the post body for the /api/clone{files,blobs,errorblobs} urls.
Args:
file_tuples: A list of tuples. Each tuple should contain the entries
appropriate for the endpoint in question.
Returns:
A string containing the properly delimited tuples.
"""
file_list = []
for tup in file_tuples:
path = tup[0]
tup = tup[1:]
file_list.append(TUPLE_DELIMITER.join([path] + list(tup)))
return LIST_DELIMITER.join(file_list)
class NagFile(validation.Validated):
"""A validated YAML class to represent the user's nag preferences.
Attributes:
timestamp: The timestamp of the last nag.
opt_in: True if the user wants to check for updates on dev_appserver
start. False if not. May be None if we have not asked the user yet.
"""
ATTRIBUTES = {
'timestamp': validation.TYPE_FLOAT,
'opt_in': validation.Optional(validation.TYPE_BOOL),
}
@staticmethod
def Load(nag_file):
"""Load a single NagFile object where one and only one is expected.
Args:
nag_file: A file-like object or string containing the yaml data to parse.
Returns:
A NagFile instance.
"""
return yaml_object.BuildSingleObject(NagFile, nag_file)
def GetVersionObject(isfile=os.path.isfile, open_fn=open):
"""Gets the version of the SDK by parsing the VERSION file.
Args:
isfile: used for testing.
open_fn: Used for testing.
Returns:
A Yaml object or None if the VERSION file does not exist.
"""
version_filename = os.path.join(os.path.dirname(google.__file__),
VERSION_FILE)
if not isfile(version_filename):
logging.error('Could not find version file at %s', version_filename)
return None
version_fh = open_fn(version_filename, 'r')
try:
version = yaml.safe_load(version_fh)
finally:
version_fh.close()
return version
def RetryWithBackoff(initial_delay, backoff_factor, max_delay, max_tries,
callable_func):
"""Calls a function multiple times, backing off more and more each time.
Args:
initial_delay: Initial delay after first try, in seconds.
backoff_factor: Delay will be multiplied by this factor after each try.
max_delay: Max delay factor.
max_tries: Maximum number of tries.
callable_func: The method to call, will pass no arguments.
Returns:
True if the function succeded in one of its tries.
Raises:
Whatever the function raises--an exception will immediately stop retries.
"""
delay = initial_delay
if callable_func():
return True
while max_tries > 1:
StatusUpdate('Will check again in %s seconds.' % delay)
time.sleep(delay)
delay *= backoff_factor
if max_delay and delay > max_delay:
delay = max_delay
max_tries -= 1
if callable_func():
return True
return False
def _VersionList(release):
"""Parse a version string into a list of ints.
Args:
release: The 'release' version, e.g. '1.2.4'.
(Due to YAML parsing this may also be an int or float.)
Returns:
A list of ints corresponding to the parts of the version string
between periods. Example:
'1.2.4' -> [1, 2, 4]
'1.2.3.4' -> [1, 2, 3, 4]
Raises:
ValueError if not all the parts are valid integers.
"""
return [int(part) for part in str(release).split('.')]
class UpdateCheck(object):
"""Determines if the local SDK is the latest version.
Nags the user when there are updates to the SDK. As the SDK becomes
more out of date, the language in the nagging gets stronger. We
store a little yaml file in the user's home directory so that we nag
the user only once a week.
The yaml file has the following field:
'timestamp': Last time we nagged the user in seconds since the epoch.
Attributes:
server: An AbstractRpcServer instance used to check for the latest SDK.
config: The app's AppInfoExternal. Needed to determine which api_version
the app is using.
"""
def __init__(self,
server,
config,
isdir=os.path.isdir,
isfile=os.path.isfile,
open_fn=open):
"""Create a new UpdateCheck.
Args:
server: The AbstractRpcServer to use.
config: The yaml object that specifies the configuration of this
application.
isdir: Replacement for os.path.isdir (for testing).
isfile: Replacement for os.path.isfile (for testing).
open_fn: Replacement for the open builtin (for testing).
"""
self.server = server
self.config = config
self.isdir = isdir
self.isfile = isfile
self.open = open_fn
@staticmethod
def MakeNagFilename():
"""Returns the filename for the nag file for this user."""
user_homedir = os.path.expanduser('~/')
if not os.path.isdir(user_homedir):
drive, unused_tail = os.path.splitdrive(os.__file__)
if drive:
os.environ['HOMEDRIVE'] = drive
return os.path.expanduser('~/' + NAG_FILE)
def _ParseVersionFile(self):
"""Parse the local VERSION file.
Returns:
A Yaml object or None if the file does not exist.
"""
return GetVersionObject(isfile=self.isfile, open_fn=self.open)
def CheckSupportedVersion(self):
"""Determines if the app's api_version is supported by the SDK.
Uses the api_version field from the AppInfoExternal to determine if
the SDK supports that api_version.
Raises:
SystemExit if the api_version is not supported.
"""
version = self._ParseVersionFile()
if version is None:
logging.error('Could not determine if the SDK supports the api_version '
'requested in app.yaml.')
return
if self.config.api_version not in version['api_versions']:
logging.critical('The api_version specified in app.yaml (%s) is not '
'supported by this release of the SDK. The supported '
'api_versions are %s.',
self.config.api_version, version['api_versions'])
sys.exit(1)
def CheckForUpdates(self):
"""Queries the server for updates and nags the user if appropriate.
Queries the server for the latest SDK version at the same time reporting
the local SDK version. The server will respond with a yaml document
containing the fields:
'release': The name of the release (e.g. 1.2).
'timestamp': The time the release was created (YYYY-MM-DD HH:MM AM/PM TZ).
'api_versions': A list of api_version strings (e.g. ['1', 'beta']).
We will nag the user with increasing severity if:
- There is a new release.
- There is a new release with a new api_version.
- There is a new release that does not support the api_version named in
self.config.
"""
version = self._ParseVersionFile()
if version is None:
logging.info('Skipping update check')
return
logging.info('Checking for updates to the SDK.')
try:
response = self.server.Send('/api/updatecheck',
timeout=UPDATE_CHECK_TIMEOUT,
release=version['release'],
timestamp=version['timestamp'],
api_versions=version['api_versions'])
except urllib2.URLError, e:
logging.info('Update check failed: %s', e)
return
latest = yaml.safe_load(response)
if version['release'] == latest['release']:
logging.info('The SDK is up to date.')
return
try:
this_release = _VersionList(version['release'])
except ValueError:
logging.warn('Could not parse this release version (%r)',
version['release'])
else:
try:
advertised_release = _VersionList(latest['release'])
except ValueError:
logging.warn('Could not parse advertised release version (%r)',
latest['release'])
else:
if this_release > advertised_release:
logging.info('This SDK release is newer than the advertised release.')
return
api_versions = latest['api_versions']
if self.config.api_version not in api_versions:
self._Nag(
'The api version you are using (%s) is obsolete! You should\n'
'upgrade your SDK and test that your code works with the new\n'
'api version.' % self.config.api_version,
latest, version, force=True)
return
if self.config.api_version != api_versions[len(api_versions) - 1]:
self._Nag(
'The api version you are using (%s) is deprecated. You should\n'
'upgrade your SDK to try the new functionality.' %
self.config.api_version, latest, version)
return
self._Nag('There is a new release of the SDK available.',
latest, version)
def _ParseNagFile(self):
"""Parses the nag file.
Returns:
A NagFile if the file was present else None.
"""
nag_filename = UpdateCheck.MakeNagFilename()
if self.isfile(nag_filename):
fh = self.open(nag_filename, 'r')
try:
nag = NagFile.Load(fh)
finally:
fh.close()
return nag
return None
def _WriteNagFile(self, nag):
"""Writes the NagFile to the user's nag file.
If the destination path does not exist, this method will log an error
and fail silently.
Args:
nag: The NagFile to write.
"""
nagfilename = UpdateCheck.MakeNagFilename()
try:
fh = self.open(nagfilename, 'w')
try:
fh.write(nag.ToYAML())
finally:
fh.close()
except (OSError, IOError), e:
logging.error('Could not write nag file to %s. Error: %s', nagfilename, e)
def _Nag(self, msg, latest, version, force=False):
"""Prints a nag message and updates the nag file's timestamp.
Because we don't want to nag the user everytime, we store a simple
yaml document in the user's home directory. If the timestamp in this
doc is over a week old, we'll nag the user. And when we nag the user,
we update the timestamp in this doc.
Args:
msg: The formatted message to print to the user.
latest: The yaml document received from the server.
version: The local yaml version document.
force: If True, always nag the user, ignoring the nag file.
"""
nag = self._ParseNagFile()
if nag and not force:
last_nag = datetime.datetime.fromtimestamp(nag.timestamp)
if datetime.datetime.now() - last_nag < datetime.timedelta(weeks=1):
logging.debug('Skipping nag message')
return
if nag is None:
nag = NagFile()
nag.timestamp = time.time()
self._WriteNagFile(nag)
print '****************************************************************'
print msg
print '-----------'
print 'Latest SDK:'
print yaml.dump(latest)
print '-----------'
print 'Your SDK:'
print yaml.dump(version)
print '-----------'
print 'Please visit http://code.google.com/appengine for the latest SDK'
print '****************************************************************'
def AllowedToCheckForUpdates(self, input_fn=raw_input):
"""Determines if the user wants to check for updates.
On startup, the dev_appserver wants to check for updates to the SDK.
Because this action reports usage to Google when the user is not
otherwise communicating with Google (e.g. pushing a new app version),
the user must opt in.
If the user does not have a nag file, we will query the user and
save the response in the nag file. Subsequent calls to this function
will re-use that response.
Args:
input_fn: used to collect user input. This is for testing only.
Returns:
True if the user wants to check for updates. False otherwise.
"""
nag = self._ParseNagFile()
if nag is None:
nag = NagFile()
nag.timestamp = time.time()
if nag.opt_in is None:
answer = input_fn('Allow dev_appserver to check for updates on startup? '
'(Y/n): ')
answer = answer.strip().lower()
if answer == 'n' or answer == 'no':
print ('dev_appserver will not check for updates on startup. To '
'change this setting, edit %s' % UpdateCheck.MakeNagFilename())
nag.opt_in = False
else:
print ('dev_appserver will check for updates on startup. To change '
'this setting, edit %s' % UpdateCheck.MakeNagFilename())
nag.opt_in = True
self._WriteNagFile(nag)
return nag.opt_in
class IndexDefinitionUpload(object):
"""Provides facilities to upload index definitions to the hosting service."""
def __init__(self, server, config, definitions):
"""Creates a new DatastoreIndexUpload.
Args:
server: The RPC server to use. Should be an instance of HttpRpcServer
or TestRpcServer.
config: The AppInfoExternal object derived from the app.yaml file.
definitions: An IndexDefinitions object.
"""
self.server = server
self.config = config
self.definitions = definitions
def DoUpload(self):
"""Uploads the index definitions."""
StatusUpdate('Uploading index definitions.')
self.server.Send('/api/datastore/index/add',
app_id=self.config.application,
version=self.config.version,
payload=self.definitions.ToYAML())
class CronEntryUpload(object):
"""Provides facilities to upload cron entries to the hosting service."""
def __init__(self, server, config, cron):
"""Creates a new CronEntryUpload.
Args:
server: The RPC server to use. Should be an instance of a subclass of
AbstractRpcServer
config: The AppInfoExternal object derived from the app.yaml file.
cron: The CronInfoExternal object loaded from the cron.yaml file.
"""
self.server = server
self.config = config
self.cron = cron
def DoUpload(self):
"""Uploads the cron entries."""
StatusUpdate('Uploading cron entries.')
self.server.Send('/api/cron/update',
app_id=self.config.application,
version=self.config.version,
payload=self.cron.ToYAML())
class QueueEntryUpload(object):
"""Provides facilities to upload task queue entries to the hosting service."""
def __init__(self, server, config, queue):
"""Creates a new QueueEntryUpload.
Args:
server: The RPC server to use. Should be an instance of a subclass of
AbstractRpcServer
config: The AppInfoExternal object derived from the app.yaml file.
queue: The QueueInfoExternal object loaded from the queue.yaml file.
"""
self.server = server
self.config = config
self.queue = queue
def DoUpload(self):
"""Uploads the task queue entries."""
StatusUpdate('Uploading task queue entries.')
self.server.Send('/api/queue/update',
app_id=self.config.application,
version=self.config.version,
payload=self.queue.ToYAML())
class DosEntryUpload(object):
"""Provides facilities to upload dos entries to the hosting service."""
def __init__(self, server, config, dos):
"""Creates a new DosEntryUpload.
Args:
server: The RPC server to use. Should be an instance of a subclass of
AbstractRpcServer.
config: The AppInfoExternal object derived from the app.yaml file.
dos: The DosInfoExternal object loaded from the dos.yaml file.
"""
self.server = server
self.config = config
self.dos = dos
def DoUpload(self):
"""Uploads the dos entries."""
StatusUpdate('Uploading DOS entries.')
self.server.Send('/api/dos/update',
app_id=self.config.application,
version=self.config.version,
payload=self.dos.ToYAML())
class DefaultVersionSet(object):
"""Provides facilities to set the default (serving) version."""
def __init__(self, server, config):
"""Creates a new DefaultVersionSet.
Args:
server: The RPC server to use. Should be an instance of a subclass of
AbstractRpcServer.
config: The AppInfoExternal object derived from the app.yaml file.
"""
self.server = server
self.config = config
def SetVersion(self):
"""Sets the default version."""
StatusUpdate('Setting default version to %s.' % (self.config.version,))
self.server.Send('/api/appversion/setdefault',
app_id=self.config.application,
version=self.config.version)
class IndexOperation(object):
"""Provide facilities for writing Index operation commands."""
def __init__(self, server, config):
"""Creates a new IndexOperation.
Args:
server: The RPC server to use. Should be an instance of HttpRpcServer
or TestRpcServer.
config: appinfo.AppInfoExternal configuration object.
"""
self.server = server
self.config = config
def DoDiff(self, definitions):
"""Retrieve diff file from the server.
Args:
definitions: datastore_index.IndexDefinitions as loaded from users
index.yaml file.
Returns:
A pair of datastore_index.IndexDefinitions objects. The first record
is the set of indexes that are present in the index.yaml file but missing
from the server. The second record is the set of indexes that are
present on the server but missing from the index.yaml file (indicating
that these indexes should probably be vacuumed).
"""
StatusUpdate('Fetching index definitions diff.')
response = self.server.Send('/api/datastore/index/diff',
app_id=self.config.application,
payload=definitions.ToYAML())
return datastore_index.ParseMultipleIndexDefinitions(response)
def DoDelete(self, definitions):
"""Delete indexes from the server.
Args:
definitions: Index definitions to delete from datastore.
Returns:
A single datstore_index.IndexDefinitions containing indexes that were
not deleted, probably because they were already removed. This may
be normal behavior as there is a potential race condition between fetching
the index-diff and sending deletion confirmation through.
"""
StatusUpdate('Deleting selected index definitions.')
response = self.server.Send('/api/datastore/index/delete',
app_id=self.config.application,
payload=definitions.ToYAML())
return datastore_index.ParseIndexDefinitions(response)
class VacuumIndexesOperation(IndexOperation):
"""Provide facilities to request the deletion of datastore indexes."""
def __init__(self, server, config, force,
confirmation_fn=raw_input):
"""Creates a new VacuumIndexesOperation.
Args:
server: The RPC server to use. Should be an instance of HttpRpcServer
or TestRpcServer.
config: appinfo.AppInfoExternal configuration object.
force: True to force deletion of indexes, else False.
confirmation_fn: Function used for getting input form user.
"""
super(VacuumIndexesOperation, self).__init__(server, config)
self.force = force
self.confirmation_fn = confirmation_fn
def GetConfirmation(self, index):
"""Get confirmation from user to delete an index.
This method will enter an input loop until the user provides a
response it is expecting. Valid input is one of three responses:
y: Confirm deletion of index.
n: Do not delete index.
a: Delete all indexes without asking for further confirmation.
If the user enters nothing at all, the default action is to skip
that index and do not delete.
If the user selects 'a', as a side effect, the 'force' flag is set.
Args:
index: Index to confirm.
Returns:
True if user enters 'y' or 'a'. False if user enter 'n'.
"""
while True:
print 'This index is no longer defined in your index.yaml file.'
print
print index.ToYAML()
print
confirmation = self.confirmation_fn(
'Are you sure you want to delete this index? (N/y/a): ')
confirmation = confirmation.strip().lower()
if confirmation == 'y':
return True
elif confirmation == 'n' or not confirmation:
return False
elif confirmation == 'a':
self.force = True
return True
else:
print 'Did not understand your response.'
def DoVacuum(self, definitions):
"""Vacuum indexes in datastore.
This method will query the server to determine which indexes are not
being used according to the user's local index.yaml file. Once it has
made this determination, it confirms with the user which unused indexes
should be deleted. Once confirmation for each index is receives, it
deletes those indexes.
Because another user may in theory delete the same indexes at the same
time as the user, there is a potential race condition. In this rare cases,
some of the indexes previously confirmed for deletion will not be found.
The user is notified which indexes these were.
Args:
definitions: datastore_index.IndexDefinitions as loaded from users
index.yaml file.
"""
unused_new_indexes, notused_indexes = self.DoDiff(definitions)
deletions = datastore_index.IndexDefinitions(indexes=[])
if notused_indexes.indexes is not None:
for index in notused_indexes.indexes:
if self.force or self.GetConfirmation(index):
deletions.indexes.append(index)
if deletions.indexes:
not_deleted = self.DoDelete(deletions)
if not_deleted.indexes:
not_deleted_count = len(not_deleted.indexes)
if not_deleted_count == 1:
warning_message = ('An index was not deleted. Most likely this is '
'because it no longer exists.\n\n')
else:
warning_message = ('%d indexes were not deleted. Most likely this '
'is because they no longer exist.\n\n'
% not_deleted_count)
for index in not_deleted.indexes:
warning_message += index.ToYAML()
logging.warning(warning_message)
class LogsRequester(object):
"""Provide facilities to export request logs."""
def __init__(self, server, config, output_file,
num_days, append, severity, end, vhost, include_vhost,
include_all=None, time_func=time.time):
"""Constructor.
Args:
server: The RPC server to use. Should be an instance of HttpRpcServer
or TestRpcServer.
config: appinfo.AppInfoExternal configuration object.
output_file: Output file name.
num_days: Number of days worth of logs to export; 0 for all available.
append: True if appending to an existing file.
severity: App log severity to request (0-4); None for no app logs.
end: date object representing last day of logs to return.
vhost: The virtual host of log messages to get. None for all hosts.
include_vhost: If true, the virtual host is included in log messages.
include_all: If true, we add to the log message everything we know
about the request.
time_func: Method that return a timestamp representing now (for testing).
"""
self.server = server
self.config = config
self.output_file = output_file
self.append = append
self.num_days = num_days
self.severity = severity
self.vhost = vhost
self.include_vhost = include_vhost
self.include_all = include_all
self.version_id = self.config.version + '.1'
self.sentinel = None
self.write_mode = 'w'
if self.append:
self.sentinel = FindSentinel(self.output_file)
self.write_mode = 'a'
self.skip_until = False
now = PacificDate(time_func())
if end < now:
self.skip_until = end
else:
end = now
self.valid_dates = None
if self.num_days:
start = end - datetime.timedelta(self.num_days - 1)
self.valid_dates = (start, end)
def DownloadLogs(self):
"""Download the requested logs.
This will write the logs to the file designated by
self.output_file, or to stdout if the filename is '-'.
Multiple roundtrips to the server may be made.
"""
StatusUpdate('Downloading request logs for %s %s.' %
(self.config.application, self.version_id))
tf = tempfile.TemporaryFile()
last_offset = None
try:
while True:
try:
new_offset = self.RequestLogLines(tf, last_offset)
if not new_offset or new_offset == last_offset:
break
last_offset = new_offset
except KeyboardInterrupt:
StatusUpdate('Keyboard interrupt; saving data downloaded so far.')
break
StatusUpdate('Copying request logs to %r.' % self.output_file)
if self.output_file == '-':
of = sys.stdout
else:
try:
of = open(self.output_file, self.write_mode)
except IOError, err:
StatusUpdate('Can\'t write %r: %s.' % (self.output_file, err))
sys.exit(1)
try:
line_count = CopyReversedLines(tf, of)
finally:
of.flush()
if of is not sys.stdout:
of.close()
finally:
tf.close()
StatusUpdate('Copied %d records.' % line_count)
def RequestLogLines(self, tf, offset):
"""Make a single roundtrip to the server.
Args:
tf: Writable binary stream to which the log lines returned by
the server are written, stripped of headers, and excluding
lines skipped due to self.sentinel or self.valid_dates filtering.
offset: Offset string for a continued request; None for the first.
Returns:
The offset string to be used for the next request, if another
request should be issued; or None, if not.
"""
logging.info('Request with offset %r.', offset)
kwds = {'app_id': self.config.application,
'version': self.version_id,
'limit': 1000,
}
if offset:
kwds['offset'] = offset
if self.severity is not None:
kwds['severity'] = str(self.severity)
if self.vhost is not None:
kwds['vhost'] = str(self.vhost)
if self.include_vhost is not None:
kwds['include_vhost'] = str(self.include_vhost)
if self.include_all is not None:
kwds['include_all'] = str(self.include_all)
response = self.server.Send('/api/request_logs', payload=None, **kwds)
response = response.replace('\r', '\0')
lines = response.splitlines()
logging.info('Received %d bytes, %d records.', len(response), len(lines))
offset = None
if lines and lines[0].startswith('#'):
match = re.match(r'^#\s*next_offset=(\S+)\s*$', lines[0])
del lines[0]
if match:
offset = match.group(1)
if lines and lines[-1].startswith('#'):
del lines[-1]
valid_dates = self.valid_dates
sentinel = self.sentinel
skip_until = self.skip_until
len_sentinel = None
if sentinel:
len_sentinel = len(sentinel)
for line in lines:
if (sentinel and
line.startswith(sentinel) and
line[len_sentinel : len_sentinel+1] in ('', '\0')):
return None
linedate = DateOfLogLine(line)
if not linedate:
continue
if skip_until:
if linedate > skip_until:
continue
else:
self.skip_until = skip_until = False
if valid_dates and not valid_dates[0] <= linedate <= valid_dates[1]:
return None
tf.write(line + '\n')
if not lines:
return None
return offset
def DateOfLogLine(line):
"""Returns a date object representing the log line's timestamp.
Args:
line: a log line string.
Returns:
A date object representing the timestamp or None if parsing fails.
"""
m = re.compile(r'[^[]+\[(\d+/[A-Za-z]+/\d+):[^\d]*').match(line)
if not m:
return None
try:
return datetime.date(*time.strptime(m.group(1), '%d/%b/%Y')[:3])
except ValueError:
return None
def PacificDate(now):
"""For a UTC timestamp, return the date in the US/Pacific timezone.
Args:
now: A posix timestamp giving current UTC time.
Returns:
A date object representing what day it is in the US/Pacific timezone.
"""
return datetime.date(*time.gmtime(PacificTime(now))[:3])
def PacificTime(now):
"""Helper to return the number of seconds between UTC and Pacific time.
This is needed to compute today's date in Pacific time (more
specifically: Mountain View local time), which is how request logs
are reported. (Google servers always report times in Mountain View
local time, regardless of where they are physically located.)
This takes (post-2006) US DST into account. Pacific time is either
8 hours or 7 hours west of UTC, depending on whether DST is in
effect. Since 2007, US DST starts on the Second Sunday in March
March, and ends on the first Sunday in November. (Reference:
http://aa.usno.navy.mil/faq/docs/daylight_time.php.)
Note that the server doesn't report its local time (the HTTP Date
header uses UTC), and the client's local time is irrelevant.
Args:
now: A posix timestamp giving current UTC time.
Returns:
A pseudo-posix timestamp giving current Pacific time. Passing
this through time.gmtime() will produce a tuple in Pacific local
time.
"""
now -= 8*3600
if IsPacificDST(now):
now += 3600
return now
def IsPacificDST(now):
"""Helper for PacificTime to decide whether now is Pacific DST (PDT).
Args:
now: A pseudo-posix timestamp giving current time in PST.
Returns:
True if now falls within the range of DST, False otherwise.
"""
DAY = 24*3600
SUNDAY = 6
pst = time.gmtime(now)
year = pst[0]
assert year >= 2007
begin = calendar.timegm((year, 3, 8, 2, 0, 0, 0, 0, 0))
while time.gmtime(begin).tm_wday != SUNDAY:
begin += DAY
end = calendar.timegm((year, 11, 1, 2, 0, 0, 0, 0, 0))
while time.gmtime(end).tm_wday != SUNDAY:
end += DAY
return begin <= now < end
def CopyReversedLines(instream, outstream, blocksize=2**16):
r"""Copy lines from input stream to output stream in reverse order.
As a special feature, null bytes in the input are turned into
newlines followed by tabs in the output, but these 'sub-lines'
separated by null bytes are not reversed. E.g. If the input is
'A\0B\nC\0D\n', the output is 'C\n\tD\nA\n\tB\n'.
Args:
instream: A seekable stream open for reading in binary mode.
outstream: A stream open for writing; doesn't have to be seekable or binary.
blocksize: Optional block size for buffering, for unit testing.
Returns:
The number of lines copied.
"""
line_count = 0
instream.seek(0, 2)
last_block = instream.tell() // blocksize
spillover = ''
for iblock in xrange(last_block + 1, -1, -1):
instream.seek(iblock * blocksize)
data = instream.read(blocksize)
lines = data.splitlines(True)
lines[-1:] = ''.join(lines[-1:] + [spillover]).splitlines(True)
if lines and not lines[-1].endswith('\n'):
lines[-1] += '\n'
lines.reverse()
if lines and iblock > 0:
spillover = lines.pop()
if lines:
line_count += len(lines)
data = ''.join(lines).replace('\0', '\n\t')
outstream.write(data)
return line_count
def FindSentinel(filename, blocksize=2**16):
"""Return the sentinel line from the output file.
Args:
filename: The filename of the output file. (We'll read this file.)
blocksize: Optional block size for buffering, for unit testing.
Returns:
The contents of the last line in the file that doesn't start with
a tab, with its trailing newline stripped; or None if the file
couldn't be opened or no such line could be found by inspecting
the last 'blocksize' bytes of the file.
"""
if filename == '-':
StatusUpdate('Can\'t combine --append with output to stdout.')
sys.exit(2)
try:
fp = open(filename, 'rb')
except IOError, err:
StatusUpdate('Append mode disabled: can\'t read %r: %s.' % (filename, err))
return None
try:
fp.seek(0, 2)
fp.seek(max(0, fp.tell() - blocksize))
lines = fp.readlines()
del lines[:1]
sentinel = None
for line in lines:
if not line.startswith('\t'):
sentinel = line
if not sentinel:
StatusUpdate('Append mode disabled: can\'t find sentinel in %r.' %
filename)
return None
return sentinel.rstrip('\n')
finally:
fp.close()
class UploadBatcher(object):
"""Helper to batch file uploads."""
def __init__(self, what, app_id, version, server):
"""Constructor.
Args:
what: Either 'file' or 'blob' or 'errorblob' indicating what kind of
objects this batcher uploads. Used in messages and URLs.
app_id: The application ID.
version: The application version string.
server: The RPC server.
"""
assert what in ('file', 'blob', 'errorblob'), repr(what)
self.what = what
self.app_id = app_id
self.version = version
self.server = server
self.single_url = '/api/appversion/add' + what
self.batch_url = self.single_url + 's'
self.batching = True
self.batch = []
self.batch_size = 0
def SendBatch(self):
"""Send the current batch on its way.
If successful, resets self.batch and self.batch_size.
Raises:
HTTPError with code=404 if the server doesn't support batching.
"""
boundary = 'boundary'
parts = []
for path, payload, mime_type in self.batch:
while boundary in payload:
boundary += '%04x' % random.randint(0, 0xffff)
assert len(boundary) < 80, 'Unexpected error, please try again.'
part = '\n'.join(['',
'X-Appcfg-File: %s' % urllib.quote(path),
'X-Appcfg-Hash: %s' % _Hash(payload),
'Content-Type: %s' % mime_type,
'Content-Length: %d' % len(payload),
'Content-Transfer-Encoding: 8bit',
'',
payload,
])
parts.append(part)
parts.insert(0,
'MIME-Version: 1.0\n'
'Content-Type: multipart/mixed; boundary="%s"\n'
'\n'
'This is a message with multiple parts in MIME format.' %
boundary)
parts.append('--\n')
delimiter = '\n--%s' % boundary
payload = delimiter.join(parts)
logging.info('Uploading batch of %d %ss to %s with boundary="%s".',
len(self.batch), self.what, self.batch_url, boundary)
self.server.Send(self.batch_url,
payload=payload,
content_type='message/rfc822',
app_id=self.app_id,
version=self.version)
self.batch = []
self.batch_size = 0
def SendSingleFile(self, path, payload, mime_type):
"""Send a single file on its way."""
logging.info('Uploading %s %s (%s bytes, type=%s) to %s.',
self.what, path, len(payload), mime_type, self.single_url)
self.server.Send(self.single_url,
payload=payload,
content_type=mime_type,
path=path,
app_id=self.app_id,
version=self.version)
def Flush(self):
"""Flush the current batch.
This first attempts to send the batch as a single request; if that
fails because the server doesn't support batching, the files are
sent one by one, and self.batching is reset to False.
At the end, self.batch and self.batch_size are reset.
"""
if not self.batch:
return
try:
self.SendBatch()
except urllib2.HTTPError, err:
if err.code != 404:
raise
logging.info('Old server detected; turning off %s batching.', self.what)
self.batching = False
for path, payload, mime_type in self.batch:
self.SendSingleFile(path, payload, mime_type)
self.batch = []
self.batch_size = 0
def AddToBatch(self, path, payload, mime_type):
"""Batch a file, possibly flushing first, or perhaps upload it directly.
Args:
path: The name of the file.
payload: The contents of the file.
mime_type: The MIME Content-type of the file, or None.
If mime_type is None, application/octet-stream is substituted.
"""
if not mime_type:
mime_type = 'application/octet-stream'
size = len(payload)
if size <= MAX_BATCH_FILE_SIZE:
if (len(self.batch) >= MAX_BATCH_COUNT or
self.batch_size + size > MAX_BATCH_SIZE):
self.Flush()
if self.batching:
logging.info('Adding %s %s (%s bytes, type=%s) to batch.',
self.what, path, size, mime_type)
self.batch.append((path, payload, mime_type))
self.batch_size += size + BATCH_OVERHEAD
return
self.SendSingleFile(path, payload, mime_type)
def _Hash(content):
"""Compute the hash of the content.
Args:
content: The data to hash as a string.
Returns:
The string representation of the hash.
"""
h = sha.new(content).hexdigest()
return '%s_%s_%s_%s_%s' % (h[0:8], h[8:16], h[16:24], h[24:32], h[32:40])
class AppVersionUpload(object):
"""Provides facilities to upload a new appversion to the hosting service.
Attributes:
server: The AbstractRpcServer to use for the upload.
config: The AppInfoExternal object derived from the app.yaml file.
app_id: The application string from 'config'.
version: The version string from 'config'.
files: A dictionary of files to upload to the server, mapping path to
hash of the file contents.
in_transaction: True iff a transaction with the server has started.
An AppVersionUpload can do only one transaction at a time.
deployed: True iff the Deploy method has been called.
"""
def __init__(self, server, config):
"""Creates a new AppVersionUpload.
Args:
server: The RPC server to use. Should be an instance of HttpRpcServer or
TestRpcServer.
config: An AppInfoExternal object that specifies the configuration for
this application.
"""
self.server = server
self.config = config
self.app_id = self.config.application
self.version = self.config.version
self.files = {}
self.in_transaction = False
self.deployed = False
self.batching = True
self.file_batcher = UploadBatcher('file', self.app_id, self.version,
self.server)
self.blob_batcher = UploadBatcher('blob', self.app_id, self.version,
self.server)
self.errorblob_batcher = UploadBatcher('errorblob', self.app_id,
self.version, self.server)
def AddFile(self, path, file_handle):
"""Adds the provided file to the list to be pushed to the server.
Args:
path: The path the file should be uploaded as.
file_handle: A stream containing data to upload.
"""
assert not self.in_transaction, 'Already in a transaction.'
assert file_handle is not None
reason = appinfo.ValidFilename(path)
if reason:
logging.error(reason)
return
pos = file_handle.tell()
content_hash = _Hash(file_handle.read())
file_handle.seek(pos, 0)
self.files[path] = content_hash
def Begin(self):
"""Begins the transaction, returning a list of files that need uploading.
All calls to AddFile must be made before calling Begin().
Returns:
A list of pathnames for files that should be uploaded using UploadFile()
before Commit() can be called.
"""
assert not self.in_transaction, 'Already in a transaction.'
StatusUpdate('Initiating update.')
self.server.Send('/api/appversion/create', app_id=self.app_id,
version=self.version, payload=self.config.ToYAML())
self.in_transaction = True
files_to_clone = []
blobs_to_clone = []
errorblobs = {}
for path, content_hash in self.files.iteritems():
match_found = False
mime_type = GetMimeTypeIfStaticFile(self.config, path)
if mime_type is not None:
blobs_to_clone.append((path, content_hash, mime_type))
match_found = True
(mime_type, error_code) = LookupErrorBlob(self.config, path)
if mime_type is not None:
errorblobs[path] = content_hash
match_found = True
if not match_found:
files_to_clone.append((path, content_hash))
files_to_upload = {}
def CloneFiles(url, files, file_type):
"""Sends files to the given url.
Args:
url: the server URL to use.
files: a list of files
file_type: the type of the files
"""
if not files:
return
StatusUpdate('Cloning %d %s file%s.' %
(len(files), file_type, len(files) != 1 and 's' or ''))
for i in xrange(0, len(files), MAX_FILES_TO_CLONE):
if i > 0 and i % MAX_FILES_TO_CLONE == 0:
StatusUpdate('Cloned %d files.' % i)
chunk = files[i:min(len(files), i + MAX_FILES_TO_CLONE)]
result = self.server.Send(url,
app_id=self.app_id, version=self.version,
payload=BuildClonePostBody(chunk))
if result:
files_to_upload.update(dict(
(f, self.files[f]) for f in result.split(LIST_DELIMITER)))
CloneFiles('/api/appversion/cloneblobs', blobs_to_clone, 'static')
CloneFiles('/api/appversion/clonefiles', files_to_clone, 'application')
logging.debug('Files to upload: %s', files_to_upload)
for (path, content_hash) in errorblobs.iteritems():
files_to_upload[path] = content_hash
self.files = files_to_upload
return sorted(files_to_upload.iterkeys())
def UploadFile(self, path, file_handle):
"""Uploads a file to the hosting service.
Must only be called after Begin().
The path provided must be one of those that were returned by Begin().
Args:
path: The path the file is being uploaded as.
file_handle: A file-like object containing the data to upload.
Raises:
KeyError: The provided file is not amongst those to be uploaded.
"""
assert self.in_transaction, 'Begin() must be called before UploadFile().'
if path not in self.files:
raise KeyError('File \'%s\' is not in the list of files to be uploaded.'
% path)
del self.files[path]
match_found = False
mime_type = GetMimeTypeIfStaticFile(self.config, path)
payload = file_handle.read()
if mime_type is not None:
self.blob_batcher.AddToBatch(path, payload, mime_type)
match_found = True
(mime_type, error_code) = LookupErrorBlob(self.config, path)
if mime_type is not None:
self.errorblob_batcher.AddToBatch(error_code, payload, mime_type)
match_found = True
if not match_found:
self.file_batcher.AddToBatch(path, payload, None)
def Precompile(self):
"""Handle bytecode precompilation."""
StatusUpdate('Precompilation starting.')
files = []
while True:
if files:
StatusUpdate('Precompilation: %d files left.' % len(files))
files = self.PrecompileBatch(files)
if not files:
break
StatusUpdate('Precompilation completed.')
def PrecompileBatch(self, files):
"""Precompile a batch of files.
Args:
files: Either an empty list (for the initial request) or a list
of files to be precompiled.
Returns:
Either an empty list (if no more files need to be precompiled)
or a list of files to be precompiled subsequently.
"""
payload = LIST_DELIMITER.join(files)
response = self.server.Send('/api/appversion/precompile',
app_id=self.app_id,
version=self.version,
payload=payload)
if not response:
return []
return response.split(LIST_DELIMITER)
def Commit(self):
"""Commits the transaction, making the new app version available.
All the files returned by Begin() must have been uploaded with UploadFile()
before Commit() can be called.
This tries the new 'deploy' method; if that fails it uses the old 'commit'.
Raises:
Exception: Some required files were not uploaded.
"""
assert self.in_transaction, 'Begin() must be called before Commit().'
if self.files:
raise Exception('Not all required files have been uploaded.')
try:
self.Deploy()
if not RetryWithBackoff(1, 2, 60, 20, self.IsReady):
logging.warning('Version still not ready to serve, aborting.')
raise Exception('Version not ready.')
self.StartServing()
except urllib2.HTTPError, e:
if e.code != 404:
raise
StatusUpdate('Closing update.')
self.server.Send('/api/appversion/commit', app_id=self.app_id,
version=self.version)
self.in_transaction = False
def Deploy(self):
"""Deploys the new app version but does not make it default.
All the files returned by Begin() must have been uploaded with UploadFile()
before Deploy() can be called.
Raises:
Exception: Some required files were not uploaded.
"""
assert self.in_transaction, 'Begin() must be called before Deploy().'
if self.files:
raise Exception('Not all required files have been uploaded.')
StatusUpdate('Deploying new version.')
self.server.Send('/api/appversion/deploy', app_id=self.app_id,
version=self.version)
self.deployed = True
def IsReady(self):
"""Check if the new app version is ready to serve traffic.
Raises:
Exception: Deploy has not yet been called.
Returns:
True if the server returned the app is ready to serve.
"""
assert self.deployed, 'Deploy() must be called before IsReady().'
StatusUpdate('Checking if new version is ready to serve.')
result = self.server.Send('/api/appversion/isready', app_id=self.app_id,
version=self.version)
return result == '1'
def StartServing(self):
"""Start serving with the newly created version.
Raises:
Exception: Deploy has not yet been called.
"""
assert self.deployed, 'Deploy() must be called before IsReady().'
StatusUpdate('Closing update: new version is ready to start serving.')
self.server.Send('/api/appversion/startserving',
app_id=self.app_id, version=self.version)
self.in_transaction = False
def Rollback(self):
"""Rolls back the transaction if one is in progress."""
if not self.in_transaction:
return
StatusUpdate('Rolling back the update.')
self.server.Send('/api/appversion/rollback', app_id=self.app_id,
version=self.version)
self.in_transaction = False
self.files = {}
def DoUpload(self, paths, max_size, openfunc):
"""Uploads a new appversion with the given config and files to the server.
Args:
paths: An iterator that yields the relative paths of the files to upload.
max_size: The maximum size file to upload.
openfunc: A function that takes a path and returns a file-like object.
"""
logging.info('Reading app configuration.')
path = ''
try:
StatusUpdate('Scanning files on local disk.')
num_files = 0
for path in paths:
file_handle = openfunc(path)
try:
file_length = GetFileLength(file_handle)
if file_length > max_size:
logging.error('Ignoring file \'%s\': Too long '
'(max %d bytes, file is %d bytes)',
path, max_size, file_length)
else:
logging.info('Processing file \'%s\'', path)
self.AddFile(path, file_handle)
finally:
file_handle.close()
num_files += 1
if num_files % 500 == 0:
StatusUpdate('Scanned %d files.' % num_files)
except KeyboardInterrupt:
logging.info('User interrupted. Aborting.')
raise
except EnvironmentError, e:
logging.error('An error occurred processing file \'%s\': %s. Aborting.',
path, e)
raise
try:
missing_files = self.Begin()
if missing_files:
StatusUpdate('Uploading %d files and blobs.' % len(missing_files))
num_files = 0
for missing_file in missing_files:
file_handle = openfunc(missing_file)
try:
self.UploadFile(missing_file, file_handle)
finally:
file_handle.close()
num_files += 1
if num_files % 500 == 0:
StatusUpdate('Processed %d out of %s.' %
(num_files, len(missing_files)))
self.file_batcher.Flush()
self.blob_batcher.Flush()
self.errorblob_batcher.Flush()
StatusUpdate('Uploaded %d files and blobs' % num_files)
if (self.config.derived_file_type and
appinfo.PYTHON_PRECOMPILED in self.config.derived_file_type):
self.Precompile()
self.Commit()
except KeyboardInterrupt:
logging.info('User interrupted. Aborting.')
self.Rollback()
raise
except urllib2.HTTPError, err:
logging.info('HTTP Error (%s)', err)
self.Rollback()
raise
except:
logging.exception('An unexpected error occurred. Aborting.')
self.Rollback()
raise
logging.info('Done!')
def FileIterator(base, skip_files, separator=os.path.sep):
"""Walks a directory tree, returning all the files. Follows symlinks.
Args:
base: The base path to search for files under.
skip_files: A regular expression object for files/directories to skip.
separator: Path separator used by the running system's platform.
Yields:
Paths of files found, relative to base.
"""
dirs = ['']
while dirs:
current_dir = dirs.pop()
for entry in os.listdir(os.path.join(base, current_dir)):
name = os.path.join(current_dir, entry)
fullname = os.path.join(base, name)
if separator == '\\':
name = name.replace('\\', '/')
if os.path.isfile(fullname):
if skip_files.match(name):
logging.info('Ignoring file \'%s\': File matches ignore regex.', name)
else:
yield name
elif os.path.isdir(fullname):
if skip_files.match(name):
logging.info(
'Ignoring directory \'%s\': Directory matches ignore regex.',
name)
else:
dirs.append(name)
def GetFileLength(fh):
"""Returns the length of the file represented by fh.
This function is capable of finding the length of any seekable stream,
unlike os.fstat, which only works on file streams.
Args:
fh: The stream to get the length of.
Returns:
The length of the stream.
"""
pos = fh.tell()
fh.seek(0, 2)
length = fh.tell()
fh.seek(pos, 0)
return length
def GetUserAgent(get_version=GetVersionObject,
get_platform=appengine_rpc.GetPlatformToken):
"""Determines the value of the 'User-agent' header to use for HTTP requests.
If the 'APPCFG_SDK_NAME' environment variable is present, that will be
used as the first product token in the user-agent.
Args:
get_version: Used for testing.
get_platform: Used for testing.
Returns:
String containing the 'user-agent' header value, which includes the SDK
version, the platform information, and the version of Python;
e.g., 'appcfg_py/1.0.1 Darwin/9.2.0 Python/2.5.2'.
"""
product_tokens = []
sdk_name = os.environ.get('APPCFG_SDK_NAME')
if sdk_name:
product_tokens.append(sdk_name)
else:
version = get_version()
if version is None:
release = 'unknown'
else:
release = version['release']
product_tokens.append('appcfg_py/%s' % release)
product_tokens.append(get_platform())
python_version = '.'.join(str(i) for i in sys.version_info)
product_tokens.append('Python/%s' % python_version)
return ' '.join(product_tokens)
def GetSourceName(get_version=GetVersionObject):
"""Gets the name of this source version."""
version = get_version()
if version is None:
release = 'unknown'
else:
release = version['release']
return 'Google-appcfg-%s' % (release,)
class AppCfgApp(object):
"""Singleton class to wrap AppCfg tool functionality.
This class is responsible for parsing the command line and executing
the desired action on behalf of the user. Processing files and
communicating with the server is handled by other classes.
Attributes:
actions: A dictionary mapping action names to Action objects.
action: The Action specified on the command line.
parser: An instance of optparse.OptionParser.
options: The command line options parsed by 'parser'.
argv: The original command line as a list.
args: The positional command line args left over after parsing the options.
raw_input_fn: Function used for getting raw user input, like email.
password_input_fn: Function used for getting user password.
error_fh: Unexpected HTTPErrors are printed to this file handle.
Attributes for testing:
parser_class: The class to use for parsing the command line. Because
OptionsParser will exit the program when there is a parse failure, it
is nice to subclass OptionsParser and catch the error before exiting.
"""
def __init__(self, argv, parser_class=optparse.OptionParser,
rpc_server_class=appengine_rpc.HttpRpcServer,
raw_input_fn=raw_input,
password_input_fn=getpass.getpass,
error_fh=sys.stderr,
update_check_class=UpdateCheck):
"""Initializer. Parses the cmdline and selects the Action to use.
Initializes all of the attributes described in the class docstring.
Prints help or error messages if there is an error parsing the cmdline.
Args:
argv: The list of arguments passed to this program.
parser_class: Options parser to use for this application.
rpc_server_class: RPC server class to use for this application.
raw_input_fn: Function used for getting user email.
password_input_fn: Function used for getting user password.
error_fh: Unexpected HTTPErrors are printed to this file handle.
update_check_class: UpdateCheck class (can be replaced for testing).
"""
self.parser_class = parser_class
self.argv = argv
self.rpc_server_class = rpc_server_class
self.raw_input_fn = raw_input_fn
self.password_input_fn = password_input_fn
self.error_fh = error_fh
self.update_check_class = update_check_class
self.parser = self._GetOptionParser()
for action in self.actions.itervalues():
action.options(self, self.parser)
self.options, self.args = self.parser.parse_args(argv[1:])
if len(self.args) < 1:
self._PrintHelpAndExit()
if self.args[0] not in self.actions:
self.parser.error('Unknown action \'%s\'\n%s' %
(self.args[0], self.parser.get_description()))
action_name = self.args.pop(0)
self.action = self.actions[action_name]
self.parser, self.options = self._MakeSpecificParser(self.action)
if self.options.help:
self._PrintHelpAndExit()
if self.options.verbose == 2:
logging.getLogger().setLevel(logging.INFO)
elif self.options.verbose == 3:
logging.getLogger().setLevel(logging.DEBUG)
global verbosity
verbosity = self.options.verbose
def Run(self):
"""Executes the requested action.
Catches any HTTPErrors raised by the action and prints them to stderr.
Returns:
1 on error, 0 if successful.
"""
try:
self.action(self)
except urllib2.HTTPError, e:
body = e.read()
print >>self.error_fh, ('Error %d: --- begin server output ---\n'
'%s\n--- end server output ---' %
(e.code, body.rstrip('\n')))
return 1
except yaml_errors.EventListenerError, e:
print >>self.error_fh, ('Error parsing yaml file:\n%s' % e)
return 1
return 0
def _GetActionDescriptions(self):
"""Returns a formatted string containing the short_descs for all actions."""
action_names = self.actions.keys()
action_names.sort()
desc = ''
for action_name in action_names:
desc += ' %s: %s\n' % (action_name, self.actions[action_name].short_desc)
return desc
def _GetOptionParser(self):
"""Creates an OptionParser with generic usage and description strings.
Returns:
An OptionParser instance.
"""
class Formatter(optparse.IndentedHelpFormatter):
"""Custom help formatter that does not reformat the description."""
def format_description(self, description):
"""Very simple formatter."""
return description + '\n'
desc = self._GetActionDescriptions()
desc = ('Action must be one of:\n%s'
'Use \'help <action>\' for a detailed description.') % desc
parser = self.parser_class(usage='%prog [options] <action>',
description=desc,
formatter=Formatter(),
conflict_handler='resolve')
parser.add_option('-h', '--help', action='store_true',
dest='help', help='Show the help message and exit.')
parser.add_option('-q', '--quiet', action='store_const', const=0,
dest='verbose', help='Print errors only.')
parser.add_option('-v', '--verbose', action='store_const', const=2,
dest='verbose', default=1,
help='Print info level logs.')
parser.add_option('--noisy', action='store_const', const=3,
dest='verbose', help='Print all logs.')
parser.add_option('-s', '--server', action='store', dest='server',
default='appengine.google.com',
metavar='SERVER', help='The server to connect to.')
parser.add_option('--secure', action='store_true', dest='secure',
default=True, help=optparse.SUPPRESS_HELP)
parser.add_option('--insecure', action='store_false', dest='secure',
help='Use HTTP when communicating with the server.')
parser.add_option('-e', '--email', action='store', dest='email',
metavar='EMAIL', default=None,
help='The username to use. Will prompt if omitted.')
parser.add_option('-H', '--host', action='store', dest='host',
metavar='HOST', default=None,
help='Overrides the Host header sent with all RPCs.')
parser.add_option('--no_cookies', action='store_false',
dest='save_cookies', default=True,
help='Do not save authentication cookies to local disk.')
parser.add_option('--passin', action='store_true',
dest='passin', default=False,
help='Read the login password from stdin.')
parser.add_option('-A', '--application', action='store', dest='app_id',
help='Override application from app.yaml file.')
parser.add_option('-V', '--version', action='store', dest='version',
help='Override (major) version from app.yaml file.')
return parser
def _MakeSpecificParser(self, action):
"""Creates a new parser with documentation specific to 'action'.
Args:
action: An Action instance to be used when initializing the new parser.
Returns:
A tuple containing:
parser: An instance of OptionsParser customized to 'action'.
options: The command line options after re-parsing.
"""
parser = self._GetOptionParser()
parser.set_usage(action.usage)
parser.set_description('%s\n%s' % (action.short_desc, action.long_desc))
action.options(self, parser)
options, unused_args = parser.parse_args(self.argv[1:])
return parser, options
def _PrintHelpAndExit(self, exit_code=2):
"""Prints the parser's help message and exits the program.
Args:
exit_code: The integer code to pass to sys.exit().
"""
self.parser.print_help()
sys.exit(exit_code)
def _GetRpcServer(self):
"""Returns an instance of an AbstractRpcServer.
Returns:
A new AbstractRpcServer, on which RPC calls can be made.
"""
def GetUserCredentials():
"""Prompts the user for a username and password."""
email = self.options.email
if email is None:
email = self.raw_input_fn('Email: ')
password_prompt = 'Password for %s: ' % email
if self.options.passin:
password = self.raw_input_fn(password_prompt)
else:
password = self.password_input_fn(password_prompt)
return (email, password)
StatusUpdate('Server: %s.' % self.options.server)
if self.options.host and self.options.host == 'localhost':
email = self.options.email
if email is None:
email = 'test@example.com'
logging.info('Using debug user %s. Override with --email', email)
server = self.rpc_server_class(
self.options.server,
lambda: (email, 'password'),
GetUserAgent(),
GetSourceName(),
host_override=self.options.host,
save_cookies=self.options.save_cookies,
secure=False)
server.authenticated = True
return server
if self.options.passin:
auth_tries = 1
else:
auth_tries = 3
return self.rpc_server_class(self.options.server, GetUserCredentials,
GetUserAgent(), GetSourceName(),
host_override=self.options.host,
save_cookies=self.options.save_cookies,
auth_tries=auth_tries,
account_type='HOSTED_OR_GOOGLE',
secure=self.options.secure)
def _FindYaml(self, basepath, file_name):
"""Find yaml files in application directory.
Args:
basepath: Base application directory.
file_name: Filename without extension to search for.
Returns:
Path to located yaml file if one exists, else None.
"""
if not os.path.isdir(basepath):
self.parser.error('Not a directory: %s' % basepath)
for yaml_file in (file_name + '.yaml', file_name + '.yml'):
yaml_path = os.path.join(basepath, yaml_file)
if os.path.isfile(yaml_path):
return yaml_path
return None
def _ParseAppYaml(self, basepath):
"""Parses the app.yaml file.
Args:
basepath: the directory of the application.
Returns:
An AppInfoExternal object.
"""
appyaml_filename = self._FindYaml(basepath, 'app')
if appyaml_filename is None:
self.parser.error('Directory does not contain an app.yaml '
'configuration file.')
fh = open(appyaml_filename, 'r')
try:
appyaml = appinfo.LoadSingleAppInfo(fh)
finally:
fh.close()
orig_application = appyaml.application
orig_version = appyaml.version
if self.options.app_id:
appyaml.application = self.options.app_id
if self.options.version:
appyaml.version = self.options.version
msg = 'Application: %s' % appyaml.application
if appyaml.application != orig_application:
msg += ' (was: %s)' % orig_application
msg += '; version: %s' % appyaml.version
if appyaml.version != orig_version:
msg += ' (was: %s)' % orig_version
msg += '.'
StatusUpdate(msg)
return appyaml
def _ParseYamlFile(self, basepath, basename, parser):
"""Parses the a yaml file.
Args:
basepath: the directory of the application.
basename: the base name of the file (with the '.yaml' stripped off).
parser: the function or method used to parse the file.
Returns:
A single parsed yaml file or None if the file does not exist.
"""
file_name = self._FindYaml(basepath, basename)
if file_name is not None:
fh = open(file_name, 'r')
try:
defns = parser(fh)
finally:
fh.close()
return defns
return None
def _ParseIndexYaml(self, basepath):
"""Parses the index.yaml file.
Args:
basepath: the directory of the application.
Returns:
A single parsed yaml file or None if the file does not exist.
"""
return self._ParseYamlFile(basepath, 'index',
datastore_index.ParseIndexDefinitions)
def _ParseCronYaml(self, basepath):
"""Parses the cron.yaml file.
Args:
basepath: the directory of the application.
Returns:
A CronInfoExternal object or None if the file does not exist.
"""
return self._ParseYamlFile(basepath, 'cron', croninfo.LoadSingleCron)
def _ParseQueueYaml(self, basepath):
"""Parses the queue.yaml file.
Args:
basepath: the directory of the application.
Returns:
A CronInfoExternal object or None if the file does not exist.
"""
return self._ParseYamlFile(basepath, 'queue', queueinfo.LoadSingleQueue)
def _ParseDosYaml(self, basepath):
"""Parses the dos.yaml file.
Args:
basepath: the directory of the application.
Returns:
A DosInfoExternal object or None if the file does not exist.
"""
return self._ParseYamlFile(basepath, 'dos', dosinfo.LoadSingleDos)
def Help(self):
"""Prints help for a specific action.
Expects self.args[0] to contain the name of the action in question.
Exits the program after printing the help message.
"""
if len(self.args) != 1 or self.args[0] not in self.actions:
self.parser.error('Expected a single action argument. Must be one of:\n' +
self._GetActionDescriptions())
action = self.actions[self.args[0]]
self.parser, unused_options = self._MakeSpecificParser(action)
self._PrintHelpAndExit(exit_code=0)
def Update(self):
"""Updates and deploys a new appversion."""
if len(self.args) != 1:
self.parser.error('Expected a single <directory> argument.')
basepath = self.args[0]
appyaml = self._ParseAppYaml(basepath)
rpc_server = self._GetRpcServer()
updatecheck = self.update_check_class(rpc_server, appyaml)
updatecheck.CheckForUpdates()
appversion = AppVersionUpload(rpc_server, appyaml)
appversion.DoUpload(FileIterator(basepath, appyaml.skip_files),
self.options.max_size,
lambda path: open(os.path.join(basepath, path), 'rb'))
index_defs = self._ParseIndexYaml(basepath)
if index_defs:
index_upload = IndexDefinitionUpload(rpc_server, appyaml, index_defs)
try:
index_upload.DoUpload()
except urllib2.HTTPError, e:
StatusUpdate('Error %d: --- begin server output ---\n'
'%s\n--- end server output ---' %
(e.code, e.read().rstrip('\n')))
print >> self.error_fh, (
'Your app was updated, but there was an error updating your '
'indexes. Please retry later with appcfg.py update_indexes.')
cron_entries = self._ParseCronYaml(basepath)
if cron_entries:
cron_upload = CronEntryUpload(rpc_server, appyaml, cron_entries)
cron_upload.DoUpload()
queue_entries = self._ParseQueueYaml(basepath)
if queue_entries:
queue_upload = QueueEntryUpload(rpc_server, appyaml, queue_entries)
queue_upload.DoUpload()
dos_entries = self._ParseDosYaml(basepath)
if dos_entries:
dos_upload = DosEntryUpload(rpc_server, appyaml, dos_entries)
dos_upload.DoUpload()
def _UpdateOptions(self, parser):
"""Adds update-specific options to 'parser'.
Args:
parser: An instance of OptionsParser.
"""
parser.add_option('-S', '--max_size', type='int', dest='max_size',
default=10485760, metavar='SIZE',
help='Maximum size of a file to upload.')
def VacuumIndexes(self):
"""Deletes unused indexes."""
if len(self.args) != 1:
self.parser.error('Expected a single <directory> argument.')
basepath = self.args[0]
config = self._ParseAppYaml(basepath)
index_defs = self._ParseIndexYaml(basepath)
if index_defs is None:
index_defs = datastore_index.IndexDefinitions()
rpc_server = self._GetRpcServer()
vacuum = VacuumIndexesOperation(rpc_server,
config,
self.options.force_delete)
vacuum.DoVacuum(index_defs)
def _VacuumIndexesOptions(self, parser):
"""Adds vacuum_indexes-specific options to 'parser'.
Args:
parser: An instance of OptionsParser.
"""
parser.add_option('-f', '--force', action='store_true', dest='force_delete',
default=False,
help='Force deletion without being prompted.')
def UpdateCron(self):
"""Updates any new or changed cron definitions."""
if len(self.args) != 1:
self.parser.error('Expected a single <directory> argument.')
basepath = self.args[0]
appyaml = self._ParseAppYaml(basepath)
rpc_server = self._GetRpcServer()
cron_entries = self._ParseCronYaml(basepath)
if cron_entries:
cron_upload = CronEntryUpload(rpc_server, appyaml, cron_entries)
cron_upload.DoUpload()
def UpdateIndexes(self):
"""Updates indexes."""
if len(self.args) != 1:
self.parser.error('Expected a single <directory> argument.')
basepath = self.args[0]
appyaml = self._ParseAppYaml(basepath)
rpc_server = self._GetRpcServer()
index_defs = self._ParseIndexYaml(basepath)
if index_defs:
index_upload = IndexDefinitionUpload(rpc_server, appyaml, index_defs)
index_upload.DoUpload()
def UpdateQueues(self):
"""Updates any new or changed task queue definitions."""
if len(self.args) != 1:
self.parser.error('Expected a single <directory> argument.')
basepath = self.args[0]
appyaml = self._ParseAppYaml(basepath)
rpc_server = self._GetRpcServer()
queue_entries = self._ParseQueueYaml(basepath)
if queue_entries:
queue_upload = QueueEntryUpload(rpc_server, appyaml, queue_entries)
queue_upload.DoUpload()
def UpdateDos(self):
"""Updates any new or changed dos definitions."""
if len(self.args) != 1:
self.parser.error('Expected a single <directory> argument.')
basepath = self.args[0]
appyaml = self._ParseAppYaml(basepath)
rpc_server = self._GetRpcServer()
dos_entries = self._ParseDosYaml(basepath)
if dos_entries:
dos_upload = DosEntryUpload(rpc_server, appyaml, dos_entries)
dos_upload.DoUpload()
def Rollback(self):
"""Does a rollback of any existing transaction for this app version."""
if len(self.args) != 1:
self.parser.error('Expected a single <directory> argument.')
basepath = self.args[0]
appyaml = self._ParseAppYaml(basepath)
appversion = AppVersionUpload(self._GetRpcServer(), appyaml)
appversion.in_transaction = True
appversion.Rollback()
def SetDefaultVersion(self):
"""Sets the default version."""
if len(self.args) != 1:
self.parser.error('Expected a single <directory> argument.')
basepath = self.args[0]
appyaml = self._ParseAppYaml(basepath)
version_setter = DefaultVersionSet(self._GetRpcServer(), appyaml)
version_setter.SetVersion()
def RequestLogs(self):
"""Write request logs to a file."""
if len(self.args) != 2:
self.parser.error(
'Expected a <directory> argument and an <output_file> argument.')
if (self.options.severity is not None and
not 0 <= self.options.severity <= MAX_LOG_LEVEL):
self.parser.error(
'Severity range is 0 (DEBUG) through %s (CRITICAL).' % MAX_LOG_LEVEL)
if self.options.num_days is None:
self.options.num_days = int(not self.options.append)
try:
end_date = self._ParseEndDate(self.options.end_date)
except (TypeError, ValueError):
self.parser.error('End date must be in the format YYYY-MM-DD.')
basepath = self.args[0]
appyaml = self._ParseAppYaml(basepath)
rpc_server = self._GetRpcServer()
logs_requester = LogsRequester(rpc_server, appyaml, self.args[1],
self.options.num_days,
self.options.append,
self.options.severity,
end_date,
self.options.vhost,
self.options.include_vhost,
self.options.include_all)
logs_requester.DownloadLogs()
def _ParseEndDate(self, date, time_func=time.time):
"""Translates an ISO 8601 date to a date object.
Args:
date: A date string as YYYY-MM-DD.
time_func: time.time() function for testing.
Returns:
A date object representing the last day of logs to get.
If no date is given, returns today in the US/Pacific timezone.
"""
if not date:
return PacificDate(time_func())
return datetime.date(*[int(i) for i in date.split('-')])
def _RequestLogsOptions(self, parser):
"""Adds request_logs-specific options to 'parser'.
Args:
parser: An instance of OptionsParser.
"""
parser.add_option('-n', '--num_days', type='int', dest='num_days',
action='store', default=None,
help='Number of days worth of log data to get. '
'The cut-off point is midnight US/Pacific. '
'Use 0 to get all available logs. '
'Default is 1, unless --append is also given; '
'then the default is 0.')
parser.add_option('-a', '--append', dest='append',
action='store_true', default=False,
help='Append to existing file.')
parser.add_option('--severity', type='int', dest='severity',
action='store', default=None,
help='Severity of app-level log messages to get. '
'The range is 0 (DEBUG) through 4 (CRITICAL). '
'If omitted, only request logs are returned.')
parser.add_option('--vhost', type='string', dest='vhost',
action='store', default=None,
help='The virtual host of log messages to get. '
'If omitted, all log messages are returned.')
parser.add_option('--include_vhost', dest='include_vhost',
action='store_true', default=False,
help='Include virtual host in log messages.')
parser.add_option('--include_all', dest='include_all',
action='store_true', default=None,
help='Include everything in log messages.')
parser.add_option('--end_date', dest='end_date',
action='store', default='',
help='End date (as YYYY-MM-DD) of period for log data. '
'Defaults to today.')
def CronInfo(self, now=None, output=sys.stdout):
"""Displays information about cron definitions.
Args:
now: used for testing.
output: Used for testing.
"""
if len(self.args) != 1:
self.parser.error('Expected a single <directory> argument.')
if now is None:
now = datetime.datetime.now()
basepath = self.args[0]
cron_entries = self._ParseCronYaml(basepath)
if cron_entries and cron_entries.cron:
for entry in cron_entries.cron:
description = entry.description
if not description:
description = '<no description>'
print >>output, '\n%s:\nURL: %s\nSchedule: %s' % (description,
entry.url,
entry.schedule)
schedule = groctimespecification.GrocTimeSpecification(entry.schedule)
matches = schedule.GetMatches(now, self.options.num_runs)
for match in matches:
print >>output, '%s, %s from now' % (
match.strftime('%Y-%m-%d %H:%M:%S'), match - now)
def _CronInfoOptions(self, parser):
"""Adds cron_info-specific options to 'parser'.
Args:
parser: An instance of OptionsParser.
"""
parser.add_option('-n', '--num_runs', type='int', dest='num_runs',
action='store', default=5,
help='Number of runs of each cron job to display'
'Default is 5')
def _CheckRequiredLoadOptions(self):
"""Checks that upload/download options are present."""
for option in ['filename',]:
if getattr(self.options, option) is None:
self.parser.error('Option \'%s\' is required.' % option)
if not self.options.url:
self.parser.error('You must have google.appengine.ext.remote_api.handler '
'assigned to an endpoint in app.yaml, or provide '
'the url of the handler via the \'url\' option.')
def InferRemoteApiUrl(self, appyaml):
"""Uses app.yaml to determine the remote_api endpoint.
Args:
appyaml: A parsed app.yaml file.
Returns:
The url of the remote_api endpoint as a string, or None
"""
handlers = appyaml.handlers
handler_suffix = 'remote_api/handler.py'
app_id = appyaml.application
for handler in handlers:
if hasattr(handler, 'script') and handler.script:
if handler.script.endswith(handler_suffix):
server = self.options.server
if server == 'appengine.google.com':
return 'http://%s.appspot.com%s' % (app_id, handler.url)
else:
return 'http://%s%s' % (server, handler.url)
return None
def RunBulkloader(self, arg_dict):
"""Invokes the bulkloader with the given keyword arguments.
Args:
arg_dict: Dictionary of arguments to pass to bulkloader.Run().
"""
try:
import sqlite3
except ImportError:
logging.error('upload_data action requires SQLite3 and the python '
'sqlite3 module (included in python since 2.5).')
sys.exit(1)
sys.exit(bulkloader.Run(arg_dict))
def _SetupLoad(self):
"""Performs common verification and set up for upload and download."""
if len(self.args) != 1 and not self.options.url:
self.parser.error('Expected either --url or a single <directory> '
'argument.')
if len(self.args) == 1:
basepath = self.args[0]
appyaml = self._ParseAppYaml(basepath)
self.options.app_id = appyaml.application
if not self.options.url:
url = self.InferRemoteApiUrl(appyaml)
if url is not None:
self.options.url = url
self._CheckRequiredLoadOptions()
if self.options.batch_size < 1:
self.parser.error('batch_size must be 1 or larger.')
if verbosity == 1:
logging.getLogger().setLevel(logging.INFO)
self.options.debug = False
else:
logging.getLogger().setLevel(logging.DEBUG)
self.options.debug = True
def _MakeLoaderArgs(self):
args = dict([(arg_name, getattr(self.options, arg_name, None)) for
arg_name in (
'url',
'filename',
'batch_size',
'kind',
'num_threads',
'bandwidth_limit',
'rps_limit',
'http_limit',
'db_filename',
'config_file',
'auth_domain',
'has_header',
'loader_opts',
'log_file',
'passin',
'email',
'debug',
'exporter_opts',
'mapper_opts',
'result_db_filename',
'mapper_opts',
'dry_run',
'dump',
'restore',
'namespace',
'create_config',
)])
args['application'] = self.options.app_id
return args
def PerformDownload(self, run_fn=None):
"""Performs a datastore download via the bulkloader.
Args:
run_fn: Function to invoke the bulkloader, used for testing.
"""
if run_fn is None:
run_fn = self.RunBulkloader
self._SetupLoad()
StatusUpdate('Downloading data records.')
args = self._MakeLoaderArgs()
args['download'] = bool(args['config_file'])
args['has_header'] = False
args['map'] = False
args['dump'] = not args['config_file']
args['restore'] = False
args['create_config'] = False
run_fn(args)
def PerformUpload(self, run_fn=None):
"""Performs a datastore upload via the bulkloader.
Args:
run_fn: Function to invoke the bulkloader, used for testing.
"""
if run_fn is None:
run_fn = self.RunBulkloader
self._SetupLoad()
StatusUpdate('Uploading data records.')
args = self._MakeLoaderArgs()
args['download'] = False
args['map'] = False
args['dump'] = False
args['restore'] = not args['config_file']
args['create_config'] = False
run_fn(args)
def CreateBulkloadConfig(self, run_fn=None):
"""Create a bulkloader config via the bulkloader wizard.
Args:
run_fn: Function to invoke the bulkloader, used for testing.
"""
if run_fn is None:
run_fn = self.RunBulkloader
self._SetupLoad()
StatusUpdate('Creating bulkloader configuration.')
args = self._MakeLoaderArgs()
args['download'] = False
args['has_header'] = False
args['map'] = False
args['dump'] = False
args['restore'] = False
args['create_config'] = True
run_fn(args)
def _PerformLoadOptions(self, parser):
"""Adds options common to 'upload_data' and 'download_data'.
Args:
parser: An instance of OptionsParser.
"""
parser.add_option('--filename', type='string', dest='filename',
action='store',
help='The name of the file containing the input data.'
' (Required)')
parser.add_option('--kind', type='string', dest='kind',
action='store',
help='The kind of the entities to store.')
parser.add_option('--url', type='string', dest='url',
action='store',
help='The location of the remote_api endpoint.')
parser.add_option('--num_threads', type='int', dest='num_threads',
action='store', default=10,
help='Number of threads to upload records with.')
parser.add_option('--batch_size', type='int', dest='batch_size',
action='store', default=10,
help='Number of records to post in each request.')
parser.add_option('--bandwidth_limit', type='int', dest='bandwidth_limit',
action='store', default=250000,
help='The maximum bytes/second bandwidth for transfers.')
parser.add_option('--rps_limit', type='int', dest='rps_limit',
action='store', default=20,
help='The maximum records/second for transfers.')
parser.add_option('--http_limit', type='int', dest='http_limit',
action='store', default=8,
help='The maximum requests/second for transfers.')
parser.add_option('--db_filename', type='string', dest='db_filename',
action='store',
help='Name of the progress database file.')
parser.add_option('--auth_domain', type='string', dest='auth_domain',
action='store', default='gmail.com',
help='The name of the authorization domain to use.')
parser.add_option('--log_file', type='string', dest='log_file',
help='File to write bulkloader logs. If not supplied '
'then a new log file will be created, named: '
'bulkloader-log-TIMESTAMP.')
parser.add_option('--dry_run', action='store_true',
dest='dry_run', default=False,
help='Do not execute any remote_api calls')
parser.add_option('--namespace', type='string', dest='namespace',
action='store', default='',
help='Namespace to use when accessing datastore.')
def _PerformUploadOptions(self, parser):
"""Adds 'upload_data' specific options to the 'parser' passed in.
Args:
parser: An instance of OptionsParser.
"""
self._PerformLoadOptions(parser)
parser.add_option('--has_header', dest='has_header',
action='store_true', default=False,
help='Whether the first line of the input file should be'
' skipped')
parser.add_option('--loader_opts', type='string', dest='loader_opts',
help='A string to pass to the Loader.initialize method.')
parser.add_option('--config_file', type='string', dest='config_file',
action='store',
help='Name of the configuration file.')
def _PerformDownloadOptions(self, parser):
"""Adds 'download_data' specific options to the 'parser' passed in.
Args:
parser: An instance of OptionsParser.
"""
self._PerformLoadOptions(parser)
parser.add_option('--exporter_opts', type='string', dest='exporter_opts',
help='A string to pass to the Exporter.initialize method.'
)
parser.add_option('--result_db_filename', type='string',
dest='result_db_filename',
action='store',
help='Database to write entities to for download.')
parser.add_option('--config_file', type='string', dest='config_file',
action='store',
help='Name of the configuration file.')
def _CreateBulkloadConfigOptions(self, parser):
"""Adds 'download_data' specific options to the 'parser' passed in.
Args:
parser: An instance of OptionsParser.
"""
self._PerformLoadOptions(parser)
class Action(object):
"""Contains information about a command line action.
Attributes:
function: The name of a function defined on AppCfg or its subclasses
that will perform the appropriate action.
usage: A command line usage string.
short_desc: A one-line description of the action.
long_desc: A detailed description of the action. Whitespace and
formatting will be preserved.
options: A function that will add extra options to a given OptionParser
object.
"""
def __init__(self, function, usage, short_desc, long_desc='',
options=lambda obj, parser: None):
"""Initializer for the class attributes."""
self.function = function
self.usage = usage
self.short_desc = short_desc
self.long_desc = long_desc
self.options = options
def __call__(self, appcfg):
"""Invoke this Action on the specified AppCfg.
This calls the function of the appropriate name on AppCfg, and
respects polymophic overrides.
Args:
appcfg: The appcfg to use.
Returns:
The result of the function call.
"""
method = getattr(appcfg, self.function)
return method()
actions = {
'help': Action(
function='Help',
usage='%prog help <action>',
short_desc='Print help for a specific action.'),
'update': Action(
function='Update',
usage='%prog [options] update <directory>',
options=_UpdateOptions,
short_desc='Create or update an app version.',
long_desc="""
Specify a directory that contains all of the files required by
the app, and appcfg.py will create/update the app version referenced
in the app.yaml file at the top level of that directory. appcfg.py
will follow symlinks and recursively upload all files to the server.
Temporary or source control files (e.g. foo~, .svn/*) will be skipped."""),
'update_cron': Action(
function='UpdateCron',
usage='%prog [options] update_cron <directory>',
short_desc='Update application cron definitions.',
long_desc="""
The 'update_cron' command will update any new, removed or changed cron
definitions from the optional cron.yaml file."""),
'update_indexes': Action(
function='UpdateIndexes',
usage='%prog [options] update_indexes <directory>',
short_desc='Update application indexes.',
long_desc="""
The 'update_indexes' command will add additional indexes which are not currently
in production as well as restart any indexes that were not completed."""),
'update_queues': Action(
function='UpdateQueues',
usage='%prog [options] update_queues <directory>',
short_desc='Update application task queue definitions.',
long_desc="""
The 'update_queue' command will update any new, removed or changed task queue
definitions from the optional queue.yaml file."""),
'update_dos': Action(
function='UpdateDos',
usage='%prog [options] update_dos <directory>',
short_desc='Update application dos definitions.',
long_desc="""
The 'update_dos' command will update any new, removed or changed dos
definitions from the optional dos.yaml file."""),
'vacuum_indexes': Action(
function='VacuumIndexes',
usage='%prog [options] vacuum_indexes <directory>',
options=_VacuumIndexesOptions,
short_desc='Delete unused indexes from application.',
long_desc="""
The 'vacuum_indexes' command will help clean up indexes which are no longer
in use. It does this by comparing the local index configuration with
indexes that are actually defined on the server. If any indexes on the
server do not exist in the index configuration file, the user is given the
option to delete them."""),
'rollback': Action(
function='Rollback',
usage='%prog [options] rollback <directory>',
short_desc='Rollback an in-progress update.',
long_desc="""
The 'update' command requires a server-side transaction. Use 'rollback'
if you get an error message about another transaction being in progress
and you are sure that there is no such transaction."""),
'request_logs': Action(
function='RequestLogs',
usage='%prog [options] request_logs <directory> <output_file>',
options=_RequestLogsOptions,
short_desc='Write request logs in Apache common log format.',
long_desc="""
The 'request_logs' command exports the request logs from your application
to a file. It will write Apache common log format records ordered
chronologically. If output file is '-' stdout will be written."""),
'cron_info': Action(
function='CronInfo',
usage='%prog [options] cron_info <directory>',
options=_CronInfoOptions,
short_desc='Display information about cron jobs.',
long_desc="""
The 'cron_info' command will display the next 'number' runs (default 5) for
each cron job defined in the cron.yaml file."""),
'upload_data': Action(
function='PerformUpload',
usage='%prog [options] upload_data <directory>',
options=_PerformUploadOptions,
short_desc='Upload data records to datastore.',
long_desc="""
The 'upload_data' command translates input records into datastore entities and
uploads them into your application's datastore."""),
'download_data': Action(
function='PerformDownload',
usage='%prog [options] download_data <directory>',
options=_PerformDownloadOptions,
short_desc='Download entities from datastore.',
long_desc="""
The 'download_data' command downloads datastore entities and writes them to
file as CSV or developer defined format."""),
'create_bulkloader_config': Action(
function='CreateBulkloadConfig',
usage='%prog [options] create_bulkload_config <directory>',
options=_CreateBulkloadConfigOptions,
short_desc='Create a bulkloader.yaml from a running application.',
long_desc="""
The 'create_bulkloader_config' command creates a bulkloader.yaml configuration
template for use with upload_data or download_data."""),
'set_default_version': Action(
function='SetDefaultVersion',
usage='%prog [options] set_default_version <directory>',
short_desc='Set the default (serving) version.',
long_desc="""
The 'set_default_version' command sets the default (serving) version of the app.
Defaults to using the version specified in app.yaml; use the --version flag to
override this."""),
}
def main(argv):
logging.basicConfig(format=('%(asctime)s %(levelname)s %(filename)s:'
'%(lineno)s %(message)s '))
try:
result = AppCfgApp(argv).Run()
if result:
sys.exit(result)
except KeyboardInterrupt:
StatusUpdate('Interrupted.')
sys.exit(1)
if __name__ == '__main__':
main(sys.argv)
|
octavioturra/aritial
|
google_appengine/google/appengine/tools/appcfg.py
|
Python
|
apache-2.0
| 99,353
|
[
"VisIt"
] |
a20f62a2a485d8d8b7192fd998b806a0b43c834a25db22865e4d3b0bf2a5cc32
|
import numpy as np
import math
import sys
sys.path.insert(0,'../..')
import os
import classifier_eval_simplified
from sklearn import tree
from sklearn.ensemble import AdaBoostClassifier
from sklearn.svm import SVC
for dim in range(2,11):
comp_file_list=[]
####################################################################
# Gaussian samples operation
####################################################################
for i in range(100):
comp_file_list.append((os.environ['MLToolsDir']+"/Dalitz/gaussian_samples/gaussian_same_projection_on_each_axis/gauss_data/gaussian_same_projection_on_each_axis_redefined_{1}D_1000_0.6_0.2_0.1_{0}.txt".format(i,dim),os.environ['MLToolsDir']+"/Dalitz/gaussian_samples/gaussian_same_projection_on_each_axis/gauss_data/gaussian_same_projection_on_each_axis_redefined_{1}D_1000_0.6_0.2_0.1_1{0}.txt".format(str(i).zfill(2),dim)))
#for bdt originally we used learning_rate=0.01,n_estimators=983
#clf = tree.DecisionTreeClassifier('gini','best',37, 89, 1, 0.0, None)
clf = AdaBoostClassifier(base_estimator=tree.DecisionTreeClassifier(max_depth=2), learning_rate=0.0482,n_estimators=942)
#clf = SVC(C=params['aC'],gamma=params['agamma'],probability=True, cache_size=7000)
args=[str(dim)+ "Dgaussian_same_projection_redefined__0_1__0_1_noCPV_optimised_bdt","particle","antiparticle",100,comp_file_list,1,clf,np.logspace(-2, 10, 13),np.logspace(-9, 3, 13),0]
#For nn:
#args=[str(dim)+"Dgauss_nn","particle","antiparticle",100,comp_file_list,1,clf,np.logspace(-2, 10, 13),np.logspace(-9, 3, 13),params['dimof_middle'],params['n_hidden_layers']]
####################################################################
classifier_eval_simplified.classifier_eval(0,0,args)
|
weissercn/MLTools
|
Dalitz_simplified/evaluation_of_optimised_classifiers/bdt_gaussian_same_projection/bdt_Gaussian_same_projection_evaluation_of_optimised_classifiers.py
|
Python
|
mit
| 1,785
|
[
"Gaussian"
] |
387511534cbb5a05fae5c274a05fc5ea3617d7f181a48b18a1c7393f42e40666
|
# -*- coding:utf-8 -*-
# Copyright (c) 2015, Galaxy Authors. All Rights Reserved
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# Author: wangtaize@baidu.com
# Date: 2015-03-30
import logging
from common import http
LOG = logging.getLogger("console")
def user_required(func):
def user_wrapper(request,*args,**kwds):
builder = http.ResponseBuilder()
user_id = request.GET.get("user",None) or request.POST.get("user",None)
if not user_id:
return builder.error("user id required").build_json()
request.user_id = user_id
return func(request,*args,**kwds)
return user_wrapper
def service_id_required(func):
def service_id_wrapper(request, *args, **kwds):
builder = http.ResponseBuilder()
service_id = request.GET.get("service",None) or request.POST.get("service",None)
if not service_id:
return builder.error("service is required").build_json()
try:
request.service_id = int(service_id)
except Exception as e:
LOG.exception("fail to convert service to int")
return builder.error("fail to convert service to int for %s"%e)
return func(request, *args, **kwds)
return service_id_wrapper
def service_name_required(func):
def service_name_wrapper(req, *args, **kwds):
builder = http.ResponseBuilder()
service_name = req.GET.get('serviceName',None) or req.POST.get('serviceName',None)
if not service_name :
return builder.error("serviceName is required").build_json()
req.service_name = service_name
return func(req, *args, **kwds)
return service_name_wrapper
|
fxsjy/galaxy
|
console/backend/src/console/service/decorator.py
|
Python
|
bsd-3-clause
| 1,734
|
[
"Galaxy"
] |
e1c7f0589c9f2b5c9435401f8cf79c1606b56f02ee3c735fb9cf9d904c8dd7ed
|
# -*- coding: utf-8 -*-
"""Jupyter Notebook Helpers"""
from IPython.display import display, HTML
import datetime
# from time import time
import time
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import os
from collections import OrderedDict
import operator
def show_graph(graph_def, frame_size=(900, 600)):
"""Visualize TensorFlow graph."""
if hasattr(graph_def, 'as_graph_def'):
graph_def = graph_def.as_graph_def()
timestamp = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
code = """
<script>
function load() {{
document.getElementById("{id}").pbtxt = {data};
}}
</script>
<link rel="import" href="https://tensorboard.appspot.com/tf-graph-basic.build.html" onload=load()>
<div style="height:{height}px">
<tf-graph-basic id="{id}"></tf-graph-basic>
</div>
""".format(height=frame_size[1], data=repr(str(graph_def)), id='graph' + timestamp)
iframe = """
<iframe seamless style="width:{width}px;height:{height}px;border:0" srcdoc="{src}"></iframe>
""".format(width=frame_size[0], height=frame_size[1] + 20, src=code.replace('"', '"'))
display(HTML(iframe))
def getRunTime(function): # a = lambda _ = None : 3 or #a = lambda : 3
run_start_time = time.time()
result = function()
run_time = time.time() - run_start_time
return result, run_time
def getWriter(key, graph, folder):
# tensorboard --logdir=<folder>
timestamp = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
return tf.summary.FileWriter(
logdir=os.path.join(folder, timestamp, key),
graph=graph
)
def plotStats(stats, keys, stats_interval=1):
# Plot the change in the validation and training set error over training.
# stats[0:, keys[k]] #0 epoch number
# stats[1:, keys[k]] #1 for training and validation
# keys is from string to index
# stats shape is [epochs, 4]
fig_1 = plt.figure(figsize=(12, 6))
ax_1 = fig_1.add_subplot(111)
# ax_1.hold(True)
for k in ['error(train)', 'error(valid)']:
ax_1.plot(np.arange(1, stats.shape[0]) * stats_interval,
stats[1:, keys[k]], label=k)
# ax_1.hold(False)
ax_1.legend(loc=0)
ax_1.set_xlabel('Epoch number')
# Plot the change in the validation and training set accuracy over training.
fig_2 = plt.figure(figsize=(12, 6))
ax_2 = fig_2.add_subplot(111)
# ax_2.hold(True)
for k in ['acc(train)', 'acc(valid)']:
ax_2.plot(np.arange(1, stats.shape[0]) * stats_interval,
stats[1:, keys[k]], label=k)
# ax_2.hold(False)
ax_2.legend(loc=0)
ax_2.set_xlabel('Epoch number')
# plt.show() better do it outside when you want it
return fig_1, ax_1, fig_2, ax_2
def initStats(epochs):
stats = np.zeros((epochs, 4))
keys = {
'error(train)': 0,
'acc(train)': 1,
'error(valid)': 2,
'acc(valid)': 3
}
return stats, keys
def gatherStats(e, train_error, train_accuracy, valid_error, valid_accuracy, stats):
stats[e, 0] = train_error
stats[e, 1] = train_accuracy
stats[e, 2] = valid_error
stats[e, 3] = valid_accuracy
return stats
class DynStats(object):
"""
dynStats = DynStats()
dynStats.gatherStats(train_error, train_accuracy, valid_error, valid_accuracy)
return dynStats.stats, dynStats.keys
"""
def __init__(self, accuracy=False, validation=False):
super(DynStats, self).__init__()
self.__stats = []
keys = self.KEYS.copy()
if accuracy is False:
acc_keys = self.__getAccuracyKeys(keys=keys)
for acc_key in acc_keys:
del keys[acc_key]
if validation is False:
valid_keys = self.__getValidationKeys(keys=keys)
for valid_key in valid_keys:
del keys[valid_key]
# renumber them
counter = 0
for cur_key in self.KEYS.keys():
if cur_key in keys.keys():
keys[cur_key] = counter
counter += 1
self.keys = keys
KEYS = OrderedDict([
('error(train)', 0),
('acc(train)', 1),
('error(valid)', 2),
('acc(valid)', 3),
])
def gatherStats(self, train_error, train_accuracy=None, valid_error=None, valid_accuracy=None):
"""# KEEP THE ORDER"""
cur_stats = [train_error]
if train_accuracy is not None:
cur_stats.append(train_accuracy)
if valid_error is not None:
cur_stats.append(valid_error)
if valid_accuracy is not None:
cur_stats.append(valid_accuracy)
self.__stats.append(np.array(cur_stats))
return self.stats
@property
def stats(self):
return np.array(self.__stats)
@staticmethod
def __getAccuracyKeys(keys):
return [key for key in keys.keys() if "acc" in key]
@staticmethod
def __getValidationKeys(keys):
return [key for key in keys.keys() if "valid" in key]
@staticmethod
def __getErrorKeys(keys):
return [key for key in keys.keys() if "error" in key]
def plotStats(self, stats_interval=1):
# Plot the change in the validation and training set error over training.
# stats[0:, keys[k]] #0 epoch number
# stats[1:, keys[k]] #1 for training and validation
# keys is from string to index
# stats shape is [epochs, 4]
fig_1 = plt.figure(figsize=(12, 6))
ax_1 = fig_1.add_subplot(111)
# ax_1.hold(True)
for kk in self.__getErrorKeys(keys=self.keys):
ax_1.plot(np.arange(1, self.stats.shape[0]) * stats_interval, self.stats[1:, self.keys[kk]], label=kk)
# ax_1.hold(False)
ax_1.legend(loc=0)
ax_1.set_xlabel('Epoch number')
figs = [fig_1]
axes = [ax_1]
# Plot the change in the validation and training set accuracy over training.
accuracyKeys = self.__getAccuracyKeys(keys=self.keys)
if len(accuracyKeys) > 0:
fig_2 = plt.figure(figsize=(12, 6))
ax_2 = fig_2.add_subplot(111)
# ax_2.hold(True)
for kk in accuracyKeys:
ax_2.plot(np.arange(1, self.stats.shape[0]) * stats_interval, self.stats[1:, self.keys[kk]], label=kk)
# ax_2.hold(False)
ax_2.legend(loc=0)
ax_2.set_xlabel('Epoch number')
figs.append(fig_2)
axes.append(ax_2)
# plt.show() better do it outside when you want it
return figs, axes
def renderStatsList(stats_list, epochs, title='Training Error', kk='error(train)'):
fig = plt.figure(figsize=(12, 6))
assert len(stats_list) > 0, "the stats list should not be empty, a nothing experiment does not make sense"
keys = stats_list[0].keys
valid_err_ind = keys['error(valid)']
min_valid_errs = [np.min(stat.stats[:, valid_err_ind]) for stat in stats_list]
best_stats_locations = np.argsort(min_valid_errs)[
:7] # only seven because these are the colors support by default by matplotlib
for ii, cur_stat in enumerate(stats_list):
stats = cur_stat.stats
xValues = np.arange(1, stats.shape[0])
yValues = stats[1:, keys[kk]]
if ii in best_stats_locations:
plt.plot(xValues, yValues)
else:
plt.plot(xValues, yValues, c='lightgrey')
plt.legend(loc=0)
plt.title(title + ' over {} epochs'.format(epochs))
plt.xlabel('Epoch number')
plt.ylabel(title)
plt.grid()
return fig # fig.savefig('cw%d_part%d_%02d_fig.svg' % (coursework, part, figcount))
def renderStatsCollection(statsCollection, label_texts, title='Training Error', kk='error(train)', keys=DynStats.KEYS):
"""
usage:
statsCollection[(state_size, num_steps)] = stats
(note that the stats above are NOT dynamic stats)
label_texts depends on how many keys you have in your collection
"""
fig = plt.figure(figsize=(12, 6))
epochs = len(statsCollection.values()[0])
maxValidAccs = OrderedDict([(key, max(val[:, -1])) for key, val in statsCollection.iteritems()])
highValidAccs = sorted(maxValidAccs.items(), key=operator.itemgetter(1))[::-1]
highValidAccs = OrderedDict(
highValidAccs[:7]) # only seven because these are the colors support by default by matplotlib
for key in statsCollection:
label = ", ".join(
[(label_texts[ii] + ": " + str(val)) for ii, val in enumerate(key)]
)
stats = statsCollection[key]
xValues = np.arange(1, stats.shape[0])
yValues = stats[1:, keys[kk]]
if key in highValidAccs.keys():
plt.plot(xValues, yValues, label=label)
else:
plt.plot(xValues, yValues, c='lightgrey')
# plt.hold(True)
# plt.hold(False)
plt.legend(loc=0)
plt.title(title + ' over {} epochs'.format(epochs))
plt.xlabel('Epoch number')
plt.ylabel(title)
plt.grid()
plt.show()
return fig # fig.savefig('cw%d_part%d_%02d_fig.svg' % (coursework, part, figcount))
def renderStatsListWithLabels(stats_list, label_text, title='Training Error', kk='error(train)'):
keys = [(s,) for s in range(len(stats_list))]
stats_dict = OrderedDict(zip(keys, [cur_stat.stats for cur_stat in stats_list]))
return renderStatsCollection(kk=kk, title=title, label_texts=[label_text], statsCollection=stats_dict,
keys=stats_list[0].keys)
def renderStatsCollectionOfCrossValids(stats_dic, label_texts, title='Training Error', kk='error(train)',
drop_firsts=1, with_individual_folds=True):
"""
usage:
stats_dic[(learning_rate,)] = stats_list
(note that the stats above are indeed dynamic stats)
label_texts depends on how many keys you have in your collection
"""
fig = plt.figure(figsize=(12, 6))
num_k_folds = len(stats_dic.values()[0])
epochs = len(stats_dic.values()[0][0].stats)
keys = stats_dic.values()[0][0].keys
# maxValidAccs = OrderedDict([(key, max(val[:, -1])) for key, val in statsCollection.iteritems()])
# highValidAccs = sorted(maxValidAccs.items(), key=operator.itemgetter(1))[::-1]
# highValidAccs = OrderedDict(
# highValidAccs[:7]) # only seven because these are the colors support by default by matplotlib
kinds = stats_dic.keys() # ('cubic', 'quadratic', 'slinear', 'nearest', 'linear', 'zero', 4, 5)
cmap = plt.get_cmap('jet')
colors = cmap(np.linspace(0, 1.0, len(kinds)))
# https://stackoverflow.com/questions/7513262/matplotlib-large-set-of-colors-for-plots
for color, hyper_params_key in zip(colors, stats_dic.keys()):
label = ", ".join(
[(label_texts[ii] + ": " + str(val)) for ii, val in enumerate(hyper_params_key)]
)
x_values = np.arange(drop_firsts, epochs)
dyn_stats_list = stats_dic[hyper_params_key]
if with_individual_folds:
for jj, dyn_stats in enumerate(dyn_stats_list):
stats = dyn_stats.stats
y_values = stats[drop_firsts:, keys[kk]]
# if jj == 0:
# plt.plot(x_values, y_values, label=label, c=color)
# else:
plt.plot(x_values, y_values, c='lightgrey')
y_values_list = np.array([dyn_stats.stats[drop_firsts:, keys[kk]] for dyn_stats in dyn_stats_list])
y_values_mean = np.mean(y_values_list, axis=0)
plt.plot(x_values, y_values_mean, label=label, c=color)
# plt.hold(False) #deprecated
plt.legend(loc=0)
plt.title(title + ' over {} epochs'.format(epochs))
plt.xlabel('Epoch number')
plt.ylabel(title)
plt.grid()
return fig # fig.savefig('cw%d_part%d_%02d_fig.svg' % (coursework, part, figcount))
def my_plot_convergence(*args, **kwargs):
"""Plot one or several convergence traces.
Parameters
----------
* `args[i]` [`OptimizeResult`, list of `OptimizeResult`, or tuple]:
The result(s) for which to plot the convergence trace.
- if `OptimizeResult`, then draw the corresponding single trace;
- if list of `OptimizeResult`, then draw the corresponding convergence
traces in transparency, along with the average convergence trace;
- if tuple, then `args[i][0]` should be a string label and `args[i][1]`
an `OptimizeResult` or a list of `OptimizeResult`.
* `ax` [`Axes`, optional]:
The matplotlib axes on which to draw the plot, or `None` to create
a new one.
* `true_minimum` [float, optional]:
The true minimum value of the function, if known.
* `yscale` [None or string, optional]:
The scale for the y-axis.
Returns
-------
* `ax`: [`Axes`]:
The matplotlib axes.
"""
# <3 legacy python
ax = kwargs.get("ax", None)
true_minimum = kwargs.get("true_minimum", None)
yscale = kwargs.get("yscale", None)
if ax is None:
ax = plt.gca()
ax.set_title("Convergence plot")
ax.set_xlabel("Number of calls $n$")
ax.set_ylabel(r"$\min f(x)$ after $n$ calls")
ax.grid()
if yscale is not None:
ax.set_yscale(yscale)
colors = plt.cm.viridis(np.linspace(0.25, 1.0, len(args)))
for results, color in zip(args, colors):
if isinstance(results, tuple):
name, results = results
else:
name = None
if isinstance(results, list):
n_calls = len(results[0].x_iters)
iterations = range(1, n_calls + 1)
mins = [[np.min(r.func_vals[:i]) for i in iterations]
for r in results]
for m in mins:
ax.plot(iterations, m, c=color, alpha=0.2)
ax.plot(iterations, np.mean(mins, axis=0), c=color,
marker=".", markersize=12, lw=2, label=name)
else:
n_calls = len(results.x_iters)
mins = [np.min(results.func_vals[:i])
for i in range(1, n_calls + 1)]
ax.plot(range(1, n_calls + 1), mins, c=color,
marker=".", markersize=12, lw=2, label=name)
if true_minimum:
ax.axhline(true_minimum, linestyle="--",
color="r", lw=1,
label="True minimum")
if true_minimum or name:
ax.legend(loc="best")
return ax
def plot_res_gp(res_gp):
"""to plot results from the gaussian process optimization of scikit-optimize (skopt) package"""
fig = plt.figure(figsize=(12, 6))
my_plot_convergence(res_gp)
plt.grid()
plt.show()
fig = plt.figure(figsize=(12, 6))
plt.plot(res_gp.func_vals, 'bo-') # 'b = blue, o = draw circles, - = draw lines between dots
# plt.hold(True)
# plt.scatter(range(len(res_gp.func_vals)), res_gp.func_vals)
plt.ylabel(r'$f(x)$')
plt.xlabel('Number of calls $n$')
plt.xlim([0, len(res_gp.func_vals)])
plt.show()
class MyOptimizeResult(object):
def __init__(self, res_gp):
super(MyOptimizeResult, self).__init__()
self.x_iters = res_gp.x_iters
self.func_vals = res_gp.func_vals
self.best_params = res_gp.x
|
pligor/predicting-future-product-prices
|
04_time_series_prediction/mylibs/jupyter_notebook_helper.py
|
Python
|
agpl-3.0
| 15,367
|
[
"Gaussian"
] |
d4258146a4df5a2b5fc502bab1a9a11b5b2700af2fe8607c57c14e71af73c9c3
|
from parsimonious import Grammar
from grammar_dev.grammars.CustomNodeVisitorFactory import CustomVisitorFactory
def test_custom_visitor_factory():
text = """Hi there, partner"""
grammar = r"""
text = greeting punctuation identifier
greeting = hi_there?
punctuation = comma?
identifier = partner?
hi_there = "Hi there"
comma = ", "
partner = "partner"
"""
grammar = Grammar(grammar)
terminals = ["hi_there", "comma", "partner"]
nonterminals = ["text", "greeting", "punctuation", "identifier"]
custom_visitor = CustomVisitorFactory(terminals, nonterminals, dict()).create_instance()
#custom_visitor = custom_visitor.create_instance()
root = grammar.parse(text)
# print("The parse tree:")
# print(root.prettily())
xml = custom_visitor.visit(root)
assert xml=="<text> <greeting> Hi there </greeting><punctuation> , </punctuation><identifier> partner </identifier> </text>"
# print(xml)
# print("Finished.")
|
NateV/GrammarDev
|
tests/test_CustomNodeVisitorFactory.py
|
Python
|
gpl-2.0
| 950
|
[
"VisIt"
] |
4cec37dcfbabe2713a7ce5aebe3b9b07ff58f148800f83d6667edd87b54042cc
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# --- BEGIN_HEADER ---
#
# rekeywords - [insert a few words of module description on this line]
# Copyright (C) 2003-2009 The MiG Project lead by Brian Vinter
#
# This file is part of MiG.
#
# MiG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# MiG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# -- END_HEADER ---
#
"""Keywords in the runtimeenvironment configuration files"""
def get_keywords_dict():
"""Return a dictionary maping keywords to a dictionary with settings"""
rename = {
'Description': 'The name of the runtime environment.',
'Example': 'POVRAY-3.6',
'Type': 'string',
'Value': '',
'Required': True,
}
description = {
'Description': 'The general description of this runtime environment.',
'Example': 'Support execution of POVRAY-3.6 jobs.',
'Type': 'string',
'Value': 'Not available',
'Required': False,
}
software = {
'Description': """The software required to satisfy this runtime
environment. Keywords: 'name', 'version', 'url', 'description', 'icon'""",
'Example': """['povray','3.6','http://www.povray.org/download/',
'This will make the most amazing povray ever seen.','povray.jpg']""",
'Type': 'RE_software',
'Value': [],
'Required': False,
'Sublevel': True,
'Sublevel_required': ['name', 'version', 'url', 'description',
'icon'],
'Sublevel_optional': [],
}
testprocedure = {
'Description': '''The procedure for testing the runtime environment,
this must be on the mRSL format.''',
'Example': '''::EXECUTE::
command
::VERIFYFILES::
TODO: complete this example!
''',
'Type': 'testprocedure',
'Value': [],
'Required': False,
}
verifystdout = {
'Description': '''The expected content of the .stdout file if a
testprocedure job is executed. (empty lines not supported)''',
'Example': '::dido.imada.sdu.dk\n',
'Type': 'multiplestrings',
'Value': [],
'Required': False,
}
verifystderr = {
'Description': '''The expected content of the .stderr file if a
testprocedure job is executed. (empty lines not supported)''',
'Example': '::bash: notvalidcomnmand: command not found\n',
'Type': 'multiplestrings',
'Value': [],
'Required': False,
}
verifystatus = {
'Description': '''The expected content of the .status file if a
testprocedure job is executed. (empty lines not supported)''',
'Example': '.* 0\n',
'Type': 'multiplestrings',
'Value': [],
'Required': False,
}
environmentvariable = {
'Description': '''The environment variables which must be set on the
resource for the runtime environment to work. name, example, description''',
'Example': """['name=POVRAY_HOME','example=/usr/local/povray/',
'description=Path to Povray home.']""",
'Type': 'RE_environmentvariable',
'Value': [],
'Required': False,
'Sublevel': True,
'Sublevel_required': ['name', 'example', 'description'],
'Sublevel_optional': [],
}
# create the keywords in a single dictionary
keywords_dict = {
'RENAME': rename,
'DESCRIPTION': description,
'SOFTWARE': software,
'TESTPROCEDURE': testprocedure,
'VERIFYSTDOUT': verifystdout,
'VERIFYSTDERR': verifystderr,
'VERIFYSTATUS': verifystatus,
'ENVIRONMENTVARIABLE': environmentvariable,
}
return keywords_dict
|
heromod/migrid
|
mig/shared/rekeywords.py
|
Python
|
gpl-2.0
| 4,262
|
[
"Brian"
] |
13f8efe80cc84196c7438ba724aab234492657915807e24460005d25c1ddfa3f
|
import os
import re
import json
import time
import math
import random
import logging
import argparse
import operator
import functools
from collections import Counter, OrderedDict
import asyncio
import numpy as np
import pandas as pd
from aiohttp import ClientSession
from angular_flask import app
from angular_flask import helper
from angular_flask.core import mongo, cache
from angular_flask.utils import get_logger, multikeysort, cmp
static_url = 'https://fantasy.premierleague.com/api/bootstrap-static/'
order_dict_by_key = lambda d: OrderedDict(sorted(d.items(), key = lambda x: x[0], reverse=True))
logger = get_logger(__name__)
seed = 1
multiplier = 0.775
season_weights = []
for i in range(19):
season_weights += [seed]
seed *= multiplier
async def fetch(session, url):
resp = await session.request(method="GET", url=url, skip_auto_headers={'USER-AGENT'})
content = await resp.json()
return content
async def fetch_all(session, urls):
tasks = []
for url in urls:
task = asyncio.create_task(fetch(session, url))
tasks.append(task)
results = await asyncio.gather(*tasks, return_exceptions=True)
return results
def update_test():
with app.app_context():
test = mongo.db.test
val = test.find_one()['val']
test.find_one_and_update({'var': 'a'}, {'$set': {'val': val + 1}})
async def update_current_gw():
with app.app_context():
currentgw = mongo.db.currentgw
gw_obj = currentgw.find_one()
eid = gw_obj['_id']
if gw_obj['finished']: gw_finished = False
else: gw_finished = True
async with ClientSession() as session:
static_data = await fetch(session, static_url)
gw = next((x['id'] for x in static_data['events'] if x['is_current'] == True), None)
currentgw.find_one_and_update({'_id': eid}, {'$set': {'gw': gw, 'finished': gw_finished}})
async def update_epl_teams():
with app.app_context():
eplteams = mongo.db.eplteams
eplteams.delete_many({})
async with ClientSession() as session:
static_data = await fetch(session, static_url)
eplteams.insert_many([{'name': str(team['name']), 'short': str(team['short_name']), 'id': team['id']} for team in static_data['teams']])
async def update_epl_players():
with app.app_context():
eplplayers = mongo.db.eplplayers
async with ClientSession() as session:
static_data = await fetch(session, static_url)
for player in static_data['elements']:
existing_player_obj = eplplayers.find_one({'id': player['id']})
if existing_player_obj == None:
eplplayers.insert_one({'id': player['id'], 'name': player['web_name'], 'team': player['team'], 'pos': player['element_type'], 'tsb': float(player['selected_by_percent']), 'prev_cost': player['now_cost'], 'cost': player['now_cost']})
else:
eplplayers.find_one_and_update({'id': player['id']}, {'$set': {'team': player['team'], 'tsb': float(player['selected_by_percent']), 'prev_cost': existing_player_obj['cost'], 'cost': player['now_cost']}})
async def update_season_history():
with app.app_context():
eplplayers_season_history = mongo.db.eplplayers_season_history
eplteams = mongo.db.eplteams
async with ClientSession() as session:
static_data = await fetch(session, static_url)
for player in static_data['elements']:
player['team_name'] = eplteams.find_one({'id': player['team']})['short']
player['position'] = get_player_position(player['element_type'])
eplplayers_season_history.update_one({'id': "2020-21", 'player': player['id']}, {'$set': {"history": player}}, upsert=True)
async def update_gw_fixtures():
with app.app_context():
gwfixtures = mongo.db.gwfixtures
gwfixtures.delete_many({})
gw = mongo.db.currentgw.find_one()['gw']
fixtures_url = 'https://fantasy.premierleague.com/api/fixtures/?event=%d' % gw
async with ClientSession() as session:
fixtures_data = await fetch(session, fixtures_url)
gwfixtures.insert_many({'id': str(fix['id']), 'started': fix['started'], 'finished_90': fix['finished_provisional'], 'finished': fix['finished'], 'home': fix['team_h'], 'away': fix['team_a'], 'bps': fix['stats'][9]['a'] + fix['stats'][9]['h'] if fix['started'] == True else []} for fix in fixtures_data)
def get_fixture_status():
with app.app_context():
gwfixtures = mongo.db.gwfixtures
fixture_status_dict = {}
fixture_status_90_dict = {}
for fixture in gwfixtures.find():
fixture_status_dict[fixture['id']] = fixture['finished']
fixture_status_90_dict[fixture['id']] = fixture['finished_90']
return fixture_status_dict, fixture_status_90_dict
def process_live_points(live_data):
with app.app_context():
print('Updating Live Points!')
livepoints = mongo.db.livepoints
fixture_status_dict, fixture_status_90_dict = get_fixture_status()
for player in live_data['elements']:
player_id = player['id']
points = player['stats']['total_points']
fixture = []
minutes = []
for f in player['explain']:
fixture.append(f['fixture'])
for s in f['stats']:
if s['identifier'] == 'minutes':
minutes.append(s['value'])
break
live_player_obj = {'live_bonus': {}, 'fixture': fixture, 'minutes': minutes, 'points': points, 'fixture_finished': {str(f): fixture_status_dict[str(f)] for f in fixture}, 'fixture_finished_90': {str(f): fixture_status_90_dict[str(f)] for f in fixture}}
livepoints.update_one({'id': player_id}, {'$set': live_player_obj}, upsert=True)
update_live_bonus()
cache.flush_all()
async def update_live_points():
with app.app_context():
await update_gw_fixtures()
gw = mongo.db.currentgw.find_one()['gw']
gwpoints = mongo.db.gwpoints
live_url = 'https://fantasy.premierleague.com/api/event/%d/live' % gw
async with ClientSession() as session:
live_data = await fetch(session, live_url)
gwpoints.update_one({'gw': gw}, {'$set': {'players': live_data['elements']}}, upsert=True)
process_live_points(live_data)
def update_live_bonus():
with app.app_context():
livepoints = mongo.db.livepoints
gwfixtures = mongo.db.gwfixtures
for fixture in gwfixtures.find():
if len(fixture['bps']) > 0:
bps_dict = {}
bonus_candidates_dict = {3: [], 2: [], 1: []}
total_bonus_candidates = 0
bps_values = sorted(fixture['bps'], key=lambda x: x['value'], reverse=True)
#Group players by 'bonus points score' values
for tup in bps_values:
if tup['value'] in bps_dict:
bps_dict[tup['value']] += [tup['element']]
else:
bps_dict[tup['value']] = [tup['element']]
bps_dict = order_dict_by_key(bps_dict)
for bps, players in bps_dict.items():
#Break if there are at least 3 candidates already
if total_bonus_candidates >= 3:
break
if len(bonus_candidates_dict[3]) == 0:
bonus_candidates_dict[3] = players
elif len(bonus_candidates_dict[2]) == 0:
if total_bonus_candidates == 1:
bonus_candidates_dict[2] = players
else:
bonus_candidates_dict[1] = players
else:
bonus_candidates_dict[1] = players
total_bonus_candidates += len(players)
for bonus_value, players in bonus_candidates_dict.items():
for player in players:
livepoints.update_one({'id': player}, {'$set': {'live_bonus': { fixture["id"]: bonus_value }}})
async def store_gw_points():
with app.app_context():
curr_gw = mongo.db.currentgw.find_one()['gw']
gwpoints = mongo.db.gwpoints
async with ClientSession() as session:
for gw in range(1, curr_gw + 1):
gw_url = 'https://fantasy.premierleague.com/api/event/%d/live/' % gw
gw_data = await fetch(session, gw_url)
gwpoints.update_one({'gw': gw}, {'$set': {'players': gw_data['elements']}}, upsert=True)
print('Stored points for GW: ' + str(gw))
def save_picks(manager_id, manager_name, gw_data, gw_finished):
with app.app_context():
eplplayers = mongo.db.eplplayers
playing, bench = [], []
captain = None
vicecaptain = None
position_map = dict.fromkeys([str(x) for x in range(1, 16)], None)
transcost = gw_data['entry_history']['event_transfers_cost']
overall_rank = gw_data['entry_history']['overall_rank']
total_points = gw_data['entry_history']['total_points'] - gw_data['entry_history']['points']
if gw_finished:
total_points += transcost
team_value = (gw_data['entry_history']['value']) / float(10)
chip = gw_data['active_chip']
points = gw_data['entry_history']['points']
bench_points = gw_data['entry_history']['points_on_bench']
for pick in gw_data['picks']:
if pick['is_captain']:
captain = pick['element']
if pick['is_vice_captain']:
vicecaptain = pick['element']
if pick['position'] >= 12:
bench.append(pick['element'])
else:
playing.append(pick['element'])
player_position = eplplayers.find_one({'id': pick['element']})['pos']
position_map[str(pick['position'])] = player_position
return {'code': manager_id, 'name': manager_name, 'overall_rank': overall_rank, 'total_points': total_points, 'team_value': team_value, 'position_map': position_map, 'points': points, 'bench_points': bench_points, 'captain': captain, 'vicecaptain': vicecaptain, 'chip': chip, 'cost': transcost, 'playing': playing, 'bench': bench}
def process_picks(manager_list, responses, gw_finished):
tourny_gwpicks = mongo.db.tourny_gwpicks
tourny_gwpicks.delete_many({})
for manager_obj, manager_picks in zip(manager_list, responses):
manager_code, manager_name = manager_obj
manager_obj = save_picks(manager_code, manager_name, manager_picks, gw_finished)
tourny_gwpicks.update_one({"code": manager_code}, {'$set': manager_obj}, upsert=True)
async def update_tourny_picks_custom():
with app.app_context():
tourny_teams = mongo.db.tourny_teams
tourny_gwpicks = mongo.db.tourny_gwpicks
curr_gw = mongo.db.currentgw.find_one()
gw = curr_gw['gw']
gw_finished = curr_gw['finished']
picks_url = 'https://fantasy.premierleague.com/api/entry/{}/event/{}/picks/'
manager_list, manager_urls = [], []
team = tourny_teams.find_one({"id": "sparta_Patrick-Abdul-Ed"})
manager_list += [(manager['code'], manager['manager']) for manager in team['managers']]
manager_urls += [picks_url.format(manager['code'], gw) for manager in team['managers']]
async with ClientSession() as session:
logger.info("Requesting manager picks for tournys")
responses = await fetch_all(session, manager_urls)
logger.info("Received manager picks for tournys")
for manager_obj, manager_picks in zip(manager_list, responses):
manager_code, manager_name = manager_obj
manager_obj = save_picks(manager_code, manager_name, manager_picks, gw_finished)
tourny_gwpicks.update_one({"code": manager_code}, {'$set': manager_obj}, upsert=True)
async def update_tourny_picks():
with app.app_context():
tourny_teams = mongo.db.tourny_teams
curr_gw = mongo.db.currentgw.find_one()
gw = curr_gw['gw']
gw_finished = curr_gw['finished']
picks_url = 'https://fantasy.premierleague.com/api/entry/{}/event/{}/picks/'
manager_list, manager_urls = [], []
for team in tourny_teams.find():
manager_list += [(manager['code'], manager['manager']) for manager in team['managers']]
manager_urls += [picks_url.format(manager['code'], gw) for manager in team['managers']]
async with ClientSession() as session:
logger.info("Requesting manager picks for tournys")
responses = await fetch_all(session, manager_urls)
logger.info("Received manager picks for tournys")
process_picks(manager_list, responses, gw_finished)
async def update_bot_user_picks():
with app.app_context():
bot_users = mongo.db.botusers
logger.info("Requesting manager picks for bot")
for user in bot_users.find():
user_fpl_id = user['user_fpl_id']
username = user['username']
user_picks = await get_user_picks(user_fpl_id, username)
bot_users.find_one_and_update({'user_fpl_id': user_fpl_id}, {'$set': {'picks': user_picks}})
logger.info("Saved manager picks for bot")
async def get_user_picks(user_fpl_id, username):
with app.app_context():
curr_gw = mongo.db.currentgw.find_one()
gw = curr_gw['gw']
gw_finished = curr_gw['finished']
picks_url = 'https://fantasy.premierleague.com/api/entry/{}/event/{}/picks/'.format(user_fpl_id, gw)
history_url = 'https://fantasy.premierleague.com/api/entry/{}/history/'.format(user_fpl_id)
async with ClientSession() as session:
manager_data = await fetch_all(session, [picks_url, history_url])
pick_data, history_data = manager_data
pick_obj = save_picks(user_fpl_id, username, pick_data, gw_finished)
pick_obj['current_season'] = history_data['current']
pick_obj['season_history'] = [season['rank'] for season in history_data['past']]
pick_obj['chips'] = history_data['chips']
pick_obj['total_points'] = next((i_gw['total_points'] for i_gw in history_data['current'] if i_gw['event'] == gw - 1), 0)
pick_obj['custom_points'] = next((i_gw['total_points'] for i_gw in history_data['current'] if i_gw['event'] == gw - 5), 0)
return pick_obj
'''
def store_manager_history():
with app.app_context():
ffcteams = mongo.db.ffcteams
manager_history = mongo.db.manager_history
start = time.time()
for team in teamList:
obj = ffcteams.find_one({'team': team})
for code in obj['codes']:
history_url = 'https://fantasy.premierleague.com/api/entry/%d/history/' % (code)
try:
history_data = soupify(history_url)
except ValueError:
print(history_url)
manager_history.insert_one({'code': code, 'chips': history_data['chips'], 'entry': history_data['entry'], 'past_seasons': history_data['season'], 'season_history': history_data['history']})
print('Processed ' + team)
print(time.time() - start)
'''
def init_tourny_standings(league):
with app.app_context():
tourny = mongo.db.tourny
tourny_obj = tourny.find_one({'id': league})
leaguestandings = mongo.db.leaguestandings
standings = []
c = 1
for team in tourny_obj['teams']:
item = {
'id': c,
'position': 1,
'last_gw_position': 1,
'team': team,
'played': 0,
'won': 0,
'drawn': 0,
'lost': 0,
'gd': 0,
'total': 0,
'points': 0
}
c += 1
standings.append(item)
leaguestandings.insert_one({'league': league, 'standings': standings})
print('Initialized standings')
def update_ffc_standings(league):
with app.app_context():
leaguestandings = mongo.db.leaguestandings
league_obj = leaguestandings.find_one({'league': league})
if league_obj == None: return
standings = league_obj['standings']
gw_results = [
[('Liverpool', 573), ('Watford', 500)],
[('Burnley', 539), ('Newcastle', 512)],
[('Chelsea', 506), ('Bournemouth', 523)],
[('Leicester', 537), ('Norwich', 469)],
[('Sheffield Utd', 555), ('Aston Villa', 542)],
[('Southampton', 493), ('West Ham', 464)],
[('Man Utd', 522), ('Everton', 514)],
[('Wolves', 605), ('Spurs', 526)],
[('Arsenal', 573), ('Man City', 477)],
[('Crystal Palace', 504), ('Brighton', 519)]
]
standings_dict = {}
for fixture in gw_results:
draw = False
winner_points = 3
loser_points = 0
winner_bonus = 0
loser_bonus = 0
teamA = fixture[0][0]
teamB = fixture[1][0]
teamA_score = fixture[0][1]
teamB_score = fixture[1][1]
if abs(teamA_score - teamB_score) <= 9:
draw = True
elif teamA_score > teamB_score:
winner = teamA
loser = teamB
winner_score = teamA_score
loser_score = teamB_score
else:
winner = teamB
loser = teamA
winner_score = teamB_score
loser_score = teamA_score
gd = (abs(teamA_score - teamB_score)) // 10
if draw:
standings_dict[teamA] = {'played': 1, 'drawn': 1, 'total_score': teamA_score, 'points': 1}
standings_dict[teamB] = {'played': 1, 'drawn': 1, 'total_score': teamB_score, 'points': 1}
else:
standings_dict[winner] = {'played': 1, 'won': 1, 'gd': gd, 'total_score': winner_score, 'points': winner_points}
standings_dict[loser] = {'played': 1, 'lost': 1, 'gd': gd * -1, 'total_score': loser_score, 'points': loser_points}
for obj in standings:
update_team_obj = standings_dict[obj['team']]
for prop, value in update_team_obj.items():
obj[prop] += value
leaguestandings.update_one({'league': league}, {'$set': {'standings': standings}})
update_ffc_standing_positions(league)
print('Updated standings')
def update_ffc_standing_positions(league):
with app.app_context():
standings = []
leaguestandings = mongo.db.leaguestandings
league_obj = leaguestandings.find_one({'league': league})
if league_obj == None: return
new_standings = multikeysort(league_obj['standings'], ['-points', '-gd', '-total_score', '-bonus'])
for index, item in enumerate(new_standings):
item['last_gw_position'] = item['position']
item['position'] = index + 1
leaguestandings.update_one({'league': league}, {'$set': {'standings': new_standings}})
print('Updated positions')
def analyze_manager_history():
manager_dict = {}
with app.app_context():
manager_history = mongo.db.manager_history
for manager in manager_history.find():
manager_rank = min([gw['rank'] for gw in manager['season_history']])
manager_name = manager['entry']['player_first_name'] + ' ' + manager['entry']['player_last_name']
min_gw = [gw['rank'] for gw in manager['season_history']].index(manager_rank) + 1
min_entry = manager['season_history'][0]['entry']
manager_dict[min_entry] = (manager_name, manager_rank, min_gw)
top_managers = OrderedDict(sorted(manager_dict.items(), key = lambda x: x[1][1], reverse=False))
for k, v in top_managers.items()[:20]:
print(str(k) + ': %s' % (v,))
async def update_for_gw(update_gw=True):
start = time.time()
if update_gw:
await update_current_gw()
await update_epl_players()
await update_live_points()
await update_tourny_picks()
# await update_bot_user_picks()
logger.info("[update_for_gw] time: {}".format(time.time() - start))
def update_teams(league, league_file, sheet_name, column_row=0):
with app.app_context():
tourny = mongo.db.tourny
teams = tourny.find_one({'id': league})['teams']
tourny_teams = mongo.db.tourny_teams
df = pd.read_excel(league_file, sheet_name=sheet_name, header=column_row)
for team in teams:
team_id = "_".join([league, team])
team_df = df[df['Team'].str.contains(team)]
manager_links = team_df['Link'].tolist()
manager_codes = [int(re.findall(r'\d+', link)[0]) for link in manager_links]
manager_names = team_df['Player'].tolist()
manager_objs = [{'manager': manager, 'code': code} for manager, code in zip(manager_names, manager_codes)]
print(team_id, manager_objs)
tourny_teams.insert_one({'id': team_id, 'managers': manager_objs})
print('Updated teams!')
def update_div2_fixtures():
with app.app_context():
tourny_fixtures = mongo.db.tourny_fixtures
df = pd.read_excel('etc/Div2_fixtures.xlsx', header=0)
fixture_list = df['FIXTURE'].tolist()
fixture_list = [fixture_list[i:i + 10] for i in range(0, len(fixture_list), 10)]
for i in range(len(fixture_list)):
gw = i + 1
fixtures = [f.replace('FFC', '').split(' vs ') for f in fixture_list[i]]
fixtures = [[team.strip() for team in fx] for fx in fixtures]
gw_fixtures = []
for fx in fixtures:
obj = {}
obj['home'] = fx[0]
obj['away'] = fx[1]
gw_fixtures.append(obj)
print(gw_fixtures)
tourny_fixtures.insert_one({'id': 'div2', 'gw': gw, 'fixtures': gw_fixtures})
print('Added div2 fixtures!')
def update_captains_bench(league):
with app.app_context():
tourny_captains = mongo.db.tourny_captains
tourny_bench = mongo.db.tourny_bench
tourny_teams = mongo.db.tourny_teams
tourny = mongo.db.tourny
teams = tourny.find_one({'id': league})['teams']
for team in teams:
team_id = "_".join([league, team])
obj = tourny_teams.find_one({'id': team_id})
captain_code = obj['managers'][0]['code']
bench_code = obj['managers'][1]['code']
tourny_captains.insert_one({'team': team_id, 'captain': captain_code})
tourny_bench.insert_one({'team': team_id, 'bench': bench_code})
print('Added captains and bench!')
async def get_league_managers(session, league_code, rank, gw):
url = 'https://users.premierleague.com/accounts/login/'
picks_url = 'https://fantasy.premierleague.com/api/entry/{}/event/{}/picks/'
history_url = 'https://fantasy.premierleague.com/api/entry/{}/history/'
league_page_url = 'https://fantasy.premierleague.com/api/leagues-classic/{}/standings/?page_new_entries=1&page_standings={}&phase=1'
payload = {
'password': os.environ.get('FPL_PASSWORD'),
'login': os.environ.get('FPL_USERNAME'),
'redirect_uri': 'https://fantasy.premierleague.com/a/login',
'app': 'plfpl-web'
}
managers = []
break_flag = False
num_pages = rank // 40 + 2
league_page_urls = [league_page_url.format(league_code, i) for i in range(1, num_pages)]
await session.post(url, data=payload)
logger.info("Requesting managers for league: {}".format(league_code))
league_page_resp = await fetch_all(session, league_page_urls)
logger.info("Received managers for league: {}".format(league_code))
for league_page in league_page_resp:
if len(league_page['standings']['results']) == 0:
break
for manager in league_page['standings']['results']:
if manager['rank'] > rank:
break_flag = True
break
managers.append({
'manager_name': manager['player_name'],
'manager_code': manager['entry'],
'manager_url': picks_url.format(manager['entry'], gw),
'manager_history_url': history_url.format(manager['entry'])
})
if break_flag:
break
return managers
def get_all_time_stats(season_history):
if len(season_history) == 0: return ('NA', 'NA', 'NA', 'NA', 'NA', 'NA')
season_history.reverse()
top10k_finish = len([rank for rank in season_history if rank <= 10000])
top100k_finish = len([rank for rank in season_history if rank <= 100000])
total_seasons_played = len(season_history)
best_rank = min(season_history)
median = np.median(season_history, axis=0)
if total_seasons_played == 1: return ('NA', median, top10k_finish, top100k_finish, total_seasons_played, best_rank)
med_abs_deviation = np.median([abs(x - median) for x in season_history], axis=0)
mad_score = [0.6745 * abs(rank - median) / med_abs_deviation for rank in season_history]
thresh = math.sqrt(max(mad_score))
season_history = [rank for rank in season_history if 0.6745 * abs(rank - median) / med_abs_deviation <= thresh]
weighted_sum, sum_weights = 0, 0
for i in range(len(season_history)):
weighted_sum += season_history[i] * season_weights[i]
sum_weights += season_weights[i]
hof_or = round(float(weighted_sum) / float(sum_weights))
return (hof_or, round(median), top10k_finish, top100k_finish, total_seasons_played, best_rank)
def get_all_time_stats_obj(season_history):
all_time_stats = get_all_time_stats(season_history)
all_time_stats_obj = {
'hof': all_time_stats[0],
'median': all_time_stats[1],
'top10k': all_time_stats[2],
'top100k': all_time_stats[3],
'total_seasons': all_time_stats[4],
'best_rank': all_time_stats[5]
}
return all_time_stats_obj
async def update_custom_league(league_name, league_code, league_type, rank=1000):
with app.app_context():
curr_gw = mongo.db.currentgw.find_one()
gw = curr_gw['gw']
gw_finished = curr_gw['finished']
leaguecodes = mongo.db.leaguecodes
leaguecodes.delete_one({'league': league_name})
async with ClientSession() as session:
c, manager_pick_objs = 0, []
managers = await get_league_managers(session, league_code, rank, gw)
manager_codes = [manager['manager_code'] for manager in managers]
manager_urls = [manager['manager_url'] for manager in managers]
manager_history_urls = [manager['manager_history_url'] for manager in managers]
manager_picks_task = asyncio.create_task(fetch_all(session, manager_urls))
manager_history_task = asyncio.create_task(fetch_all(session, manager_history_urls))
manager_tasks = [manager_picks_task, manager_history_task]
logger.info("Requesting manager picks and history for league: {}".format(league_name))
manager_picks, manager_history = await asyncio.gather(*manager_tasks, return_exceptions=True)
logger.info("Received manager picks and history for league: {}".format(league_name))
for manager, pick_data, history_data in list(zip(managers, manager_picks, manager_history)):
c += 1
pick_obj = save_picks(manager['manager_code'], manager['manager_name'], pick_data, gw_finished)
season_history = [season['rank'] for season in history_data['past']]
pick_obj['season_history'] = season_history
pick_obj['season_history_stats'] = get_all_time_stats_obj(season_history)
pick_obj['chips'] = history_data['chips']
pick_obj['total_points'] = next((i_gw['total_points'] for i_gw in history_data['current'] if i_gw['event'] == gw - 1), 0)
pick_obj['custom_points'] = next((i_gw['total_points'] for i_gw in history_data['current'] if i_gw['event'] == gw - 5), 0)
manager_pick_objs.append(pick_obj)
if c % 1000 == 0: print('Processed picks:' + str(c))
leaguecodes.insert_one({'league_id': league_code, 'league_type': league_type, 'codes': manager_codes, 'league': league_name, 'picks': manager_pick_objs})
logger.info('Updated League: {}, num of managers: {}'.format(league_name, len(managers)))
async def update_existing_leagues():
with app.app_context():
start = time.time()
leagues = mongo.db.leagues
tasks = []
leagues_objs = []
for league_obj in leagues.find():
rank = 1000
if league_obj['league_name'] == 'top10k':
rank = 10000
leagues_objs.append((league_obj['league_name'], league_obj['league_code'], league_obj['league_type'], rank))
for league_name, league_code, league_type, rank in leagues_objs:
task = asyncio.create_task(update_custom_league(league_name, league_code, league_type, rank=rank))
tasks.append(task)
results = await asyncio.gather(*tasks, return_exceptions=True)
print(time.time() - start)
print('update_existing_leagues complete!')
return results
def update_league_names():
with app.app_context():
leagues = mongo.db.leagues
leaguecodes = mongo.db.leaguecodes
for league_obj in leaguecodes.find():
leagues.insert_one({'league_code': league_obj['league_id'], 'league_name': league_obj['league'], 'league_type': league_obj['league_type']})
def extract_team_names(div, div_file, sheet_name, column_row=0):
with app.app_context():
tourny = mongo.db.tourny
df = pd.read_excel(div_file, sheet_name=sheet_name, header=column_row)
team_names = list(set(df['Team'].tolist()))
team_names = [team.replace(' FFC', '').strip() for team in team_names]
team_names.sort()
for x in team_names:
print(x)
tourny.update_one({'id': div}, {'$set': {'teams': team_names}})
async def get_bench_points():
with app.app_context():
leaguecodes = mongo.db.leaguecodes
league_obj = leaguecodes.find_one({'league': 'maniacs'})
history_urls = []
history_url = 'https://fantasy.premierleague.com/api/entry/{}/history/'
manager_names = []
results = []
for maniac in league_obj['picks']:
name = maniac['name']
code = maniac['code']
history_urls.append(history_url.format(code))
manager_names.append(name)
async with ClientSession() as session:
manager_history_data = await fetch_all(session, history_urls)
for name, manager_data in zip(manager_names, manager_history_data):
manager_bench_points = sum([gw['points_on_bench'] for gw in manager_data['current']])
results.append((name, manager_bench_points))
results = sorted(results, key=lambda x: x[1], reverse=True)
for manager, points in results:
print(manager, points)
def generate_fixtures(teams):
if len(teams) % 2:
raise Exception("Number of teams should be even.")
n = len(teams)
matches, fixtures, return_matches = ([] for i in range(3))
for _ in range(1, n):
for i in range(n // 2):
matches.append((teams[i], teams[n - 1 - i]))
return_matches.append((teams[n - 1 - i], teams[i]))
teams.insert(1, teams.pop())
fixtures.insert(len(fixtures) // 2, matches)
fixtures.append(return_matches)
matches = []
return_matches = []
return fixtures
def generate_tourny_fixtures(tourny_id):
with app.app_context():
tourny = mongo.db.tourny
tourny_fixtures = mongo.db.tourny_fixtures
teams = tourny.find_one({'id': tourny_id})['teams']
fixtures = generate_fixtures(teams)
w = 1
for gw_fixtures in fixtures:
fixtures_list = []
for home_team, away_team in gw_fixtures:
fixtures_list.append({
'home': home_team,
'away': away_team
})
tourny_fixtures.insert_one({'id': tourny_id, 'gw': w, 'fixtures': fixtures_list})
w += 1
async def store_gwfixtures():
with app.app_context():
tourny_fixtures = mongo.db.tourny_fixtures
eplteams = mongo.db.eplteams
for w in range(1, 39):
fixtures = []
async with ClientSession() as session:
fixtures_url = 'https://fantasy.premierleague.com/api/fixtures/?event=%d' % w
fixtures_data = await fetch(session, fixtures_url)
for fixture in fixtures_data:
homeTeam = eplteams.find_one({'id': fixture['team_h']})['name']
awayTeam = eplteams.find_one({'id': fixture['team_a']})['name']
fixtures.append({
'home': homeTeam,
'away': awayTeam
})
tourny_fixtures.insert_one({'id': 'efpl', 'gw': w, 'fixtures': fixtures})
def get_best_form():
with app.app_context():
gw = mongo.db.currentgw.find_one()['gw']
leaguecodes = mongo.db.leaguecodes
target_league = leaguecodes.find_one({'league': 'top10k'})
results = []
best_points = 0
for manager in target_league['picks']:
points = manager['total_points'] + manager['points'] - manager['custom_points'] - manager['cost']
if points >= best_points:
if points == best_points:
results += [(manager['code'], points)]
else:
best_points = points
results = [(manager['code'], points)]
print(results)
def get_lowest_10k():
with app.app_context():
gw = mongo.db.currentgw.find_one()['gw']
leaguecodes = mongo.db.leaguecodes
target_league = leaguecodes.find_one({'league': 'top10k'})
results = []
min_points = float('inf')
for manager in target_league['picks']:
points = manager['total_points']
if points <= min_points:
if points == min_points:
results += [(manager['code'], points)]
else:
min_points = points
results = [(manager['code'], points)]
print(results)
def get_10k_history():
with app.app_context():
gw = mongo.db.currentgw.find_one()['gw']
leaguecodes = mongo.db.leaguecodes
target_league = leaguecodes.find_one({'league': 'top10k'})
top1k_count, top10k_count, top100k_count = 0, 0, 0
top1k_count_flat, top10k_count_flat, top100k_count_flat = 0, 0, 0
flat_history = []
for manager in target_league['picks']:
if any([True for rank in manager['season_history'] if rank <= 1000]):
top1k_count += 1
if any([True for rank in manager['season_history'] if rank <= 10000]):
top10k_count += 1
if any([True for rank in manager['season_history'] if rank <= 100000]):
top100k_count += 1
top1k_count_flat += sum(rank < 1000 for rank in manager['season_history'])
top10k_count_flat += sum(rank < 10000 for rank in manager['season_history'])
top100k_count_flat += sum(rank < 100000 for rank in manager['season_history'])
flat_history += manager['season_history']
print(top1k_count, top10k_count, top100k_count)
print(len(flat_history))
print(top1k_count_flat, top10k_count_flat, top100k_count_flat)
print(top1k_count_flat / len(flat_history) * 100, top10k_count_flat / len(flat_history) * 100, top100k_count_flat / len(flat_history) * 100)
def update_history_csv(filepath, season):
with app.app_context():
eplplayers_history = mongo.db.eplplayers_history
data = pd.read_csv(filepath)
data_json = json.loads(data.to_json(orient='records'))
eplplayers_history.update_one({'id': season}, {'$set': {'history': data_json}}, upsert=True)
def update_history_col():
with app.app_context():
eplplayers_history = mongo.db.eplplayers_history
eplplayers_season_history = mongo.db.eplplayers_season_history
for season_obj in eplplayers_history.find():
season = season_obj['id']
for player_obj in season_obj['history']:
player_history_data = {}
player_history_data['id'] = season
player_history_data['player'] = player_obj['id']
player_history_data['history'] = player_obj
eplplayers_season_history.insert_one(player_history_data)
def update_season_gw_history(target_dir, season):
with app.app_context():
eplplayers_season_gw_history = mongo.db.eplplayers_season_gw_history
for subdir, dirs, files in os.walk(target_dir):
for file in files:
player_gw_data = {}
if file == "gw.csv":
filepath = os.path.join(subdir, file)
data = pd.read_csv(filepath)
data_json = json.loads(data.to_json(orient='records'))
player_id = str(data_json[0]['element'])
player_gw_data['id'] = season
player_gw_data['player'] = player_id
player_gw_data['gw'] = data_json
eplplayers_season_gw_history.insert_one(player_gw_data)
def update_history_player_teams(season):
with app.app_context():
team_map = {1: 'ARS', 2: 'BOU', 3: 'BHA', 4: 'BUR', 5: 'CAR', 6: 'CHE', 7: 'CRY', 8: 'EVE', 9: 'FUL', 10: 'HUD', 11: 'LEI', 12: 'LIV', 13: 'MCI', 14: 'MUN', 15: 'NEW', 16: 'SOU', 17: 'TOT', 18: 'WAT', 19: 'WHU', 20: 'WOL'}
eplplayers_history = mongo.db.eplplayers_history
season_obj = eplplayers_history.find_one({'id': season})
for player_obj in season_obj['history']:
player_obj['team_name'] = team_map[player_obj['team']]
eplplayers_history.update_one({'id': season}, {'$set': {'history': season_obj['history']}}, upsert=True)
def update_history_player_gw_opp_teams(season):
with app.app_context():
team_map = {}
eplplayers_history = mongo.db.eplplayers_history
eplplayers_season_gw_history = mongo.db.eplplayers_season_gw_history
season_history_obj = eplplayers_history.find_one({'id': season})
season_gw_objs = eplplayers_season_gw_history.find({'id': season})
for player_obj in season_history_obj['history']:
if player_obj['team'] not in team_map:
team_map[player_obj['team']] = player_obj['team_name']
if len(team_map) == 20: break
print(team_map)
for player_obj in season_gw_objs:
for gw_obj in player_obj['gw']:
gw_obj['opponent_team_name'] = team_map[gw_obj['opponent_team']]
eplplayers_season_gw_history.update_one({'id': season, 'player': player_obj['player']}, {'$set': {'gw': player_obj['gw']}})
def update_player_cost():
with app.app_context():
eplplayers_history = mongo.db.eplplayers_history
for season_obj in eplplayers_history.find():
for player_obj in season_obj['history']:
player_obj['now_cost'] /= 10
eplplayers_history.update_one({'id': season_obj['id']}, {'$set': {'history': season_obj['history']}}, upsert=True)
def get_player_position(element_type):
pos = "FWD"
if element_type == 1:
pos = "GKP"
elif element_type == 2:
pos = "DEF"
elif element_type == 3:
pos = "MID"
return pos
def update_player_position():
with app.app_context():
eplplayers_history = mongo.db.eplplayers_history
for season_obj in eplplayers_history.find():
for player_obj in season_obj['history']:
player_obj["position"] = get_player_position(player_obj['element_type'])
eplplayers_history.update_one({'id': season_obj['id']}, {'$set': {'history': season_obj['history']}}, upsert=True)
def create_tourny(tourny_id):
with app.app_context():
tourny = mongo.db.tourny
leagueteams = mongo.db.leagueteams
tourny_obj = {
'name': 'sparta',
'captain': {
'enabled': False
},
'bench': {
'enabled': False
},
'scoring': 'sum',
'home_advtg': {
'enabled': True,
'multiplier': 0.1
},
'teams': leagueteams.find_one({'league': tourny_id})['teams']
}
tourny.update_one({'id': tourny_id}, {'$set': tourny_obj}, upsert=True)
def archive_collection(old_collection, new_collection):
with app.app_context():
for obj in mongo.db[old_collection].find():
mongo.db[new_collection].insert_one(obj)
async def set_up_tourney_from_fpl_league(league_code, team_size, tourny_name):
with app.app_context():
tourny_teams = mongo.db.tourny_teams
url = 'https://users.premierleague.com/accounts/login/'
league_page_url = 'https://fantasy.premierleague.com/api/leagues-classic/{}/standings/?page_new_entries=1&page_standings={}&phase=1'
payload = {
'password': os.environ.get('FPL_PASSWORD'),
'login': os.environ.get('FPL_USERNAME'),
'redirect_uri': 'https://fantasy.premierleague.com/a/login',
'app': 'plfpl-web'
}
managers = []
league_page_url = league_page_url.format(league_code, 1)
async with ClientSession() as session:
await session.post(url, data=payload)
league_page_resp = await fetch(session, league_page_url)
for manager in league_page_resp['new_entries']['results']:
if manager['entry'] == 7302: continue
managers.append({
'first_name': manager['player_first_name'] ,
'last_name:': manager['player_last_name'],
'full_name': manager['player_first_name'] + " " + manager['player_last_name'],
'code': manager['entry']
})
random.shuffle(managers)
i = 0
while i < len(managers):
members = managers[i:i+team_size]
team_name = "-".join([obj['first_name'] for obj in members])
team_obj = {
"id": tourny_name + "_" + team_name,
"managers": [{"manager": manager['full_name'], "code": manager['code']} for manager in members]
}
# print(team_obj)
tourny_teams.insert_one(team_obj)
i += team_size
def setup_league(league, league_file, sheet_name):
extract_team_names(league, league_file, sheet_name)
update_teams(league, league_file, sheet_name)
update_captains_bench(league)
init_tourny_standings(league)
# update_div2_fixtures()
def update():
with app.app_context():
# tourny_teams = mongo.db.tourny_teams
# teams = tourny_teams.find({"id" : {"$regex" : "div1*"}})
# ffcbench = mongo.db.ffcbench
# leagueteams = mongo.db.leagueteams
tourny_bench = mongo.db.tourny_bench
tourny_bench.delete_many({})
# teams = leagueteams.find_one({'league': 'div1'})['teams']
# for t in teams:
# obj = ffcbench.find_one({'team': t})
# tourny_bench.insert_one({'team': 'div1_' + t, "bench": obj["bench"]})
def main():
parser = argparse.ArgumentParser(
description='Manage this Flask application.')
parser.add_argument(
'command', help='the name of the command you want to run')
args = parser.parse_args()
if args.command == 'update_gw':
asyncio.run(update_current_gw())
print("GW Updated!")
elif args.command == 'update_epl_teams':
asyncio.run(update_epl_teams())
print("EPL Teams added!")
elif args.command == 'update_epl_players':
asyncio.run(update_epl_players())
print("EPL Players added!")
elif args.command == 'update_gwfixtures':
asyncio.run(update_gw_fixtures())
print("GW Fixtures added!")
elif args.command == 'update_live_points':
asyncio.run(update_live_points())
print("Live points added!")
elif args.command == 'update_tourny_picks':
asyncio.run(update_tourny_picks())
print("Picks added!")
elif args.command == 'update_bot_picks':
asyncio.run(update_bot_user_picks())
print("Bot picks updated!")
elif args.command == 'update_for_gw':
asyncio.run(update_for_gw())
asyncio.run(update_existing_leagues())
print("Update for GW complete!")
elif args.command == 'update_during_gw':
asyncio.run(update_for_gw(update_gw=False))
asyncio.run(update_existing_leagues())
elif args.command == 'update_standings_div1':
update_ffc_standings("div1")
elif args.command == 'update_standings_div2':
update_ffc_standings("div2")
elif args.command == 'update_standings_positions':
update_ffc_standing_positions("div1")
elif args.command == 'store_gw_points':
asyncio.run(store_gw_points())
elif args.command == 'update_custom_league_1k':
asyncio.run(update_custom_league('top1k', 314, 'custom'))
elif args.command == 'update_custom_league_5k':
asyncio.run(update_custom_league('top5k', 314, 'custom', 5000))
elif args.command == 'update_custom_league_10k':
asyncio.run(update_custom_league('top10k', 314, 'custom', 10000))
elif args.command == 'add_regular_league':
asyncio.run(update_custom_league('maniacs', 634, 'regular'))
elif args.command == 'update_existing_leagues':
asyncio.run(update_existing_leagues())
elif args.command == 'form':
get_best_form()
elif args.command == 'boom':
get_lowest_10k()
elif args.command == 'history_10k':
get_10k_history()
elif args.command == 'test':
update_test()
print("Test pass!")
else:
raise Exception('Invalid command')
if __name__ == '__main__':
main()
# update()
# asyncio.run(update_season_history())
|
code247/FPL_FFC
|
manage.py
|
Python
|
mit
| 40,595
|
[
"CRYSTAL"
] |
14b179dd74048bac8220e2360736d4cb72ce3b7f5e84864bbbb058e18c3df552
|
"""
Generalized Recommender models.
This module contains matrix factorization recommender interfaces
used throughout the whole scikit-crab package.
The interfaces are realized as abstract base classes (ie., some optional
functionality is provided in the interface itself, so that the interfaces
can be subclassed).
"""
# Author: Marcel Caraciolo <marcel@muricoca.com>
#
# License: BSD Style.
import random
from base import SVDRecommender
from ..knn.item_strategies import ItemsNeighborhoodStrategy
import numpy as np
from math import sqrt
import logging
logger = logging.getLogger('crab')
class MatrixFactorBasedRecommender(SVDRecommender):
"""
Matrix Factorization Based Recommender using
Expectation Maximization algorithm.
Parameters
-----------
data_model: The data model instance that will be data source
for the recommender.
items_selection_strategy: The item candidates strategy that you
can choose for selecting the possible items to recommend.
default = ItemsNeighborhoodStrategy
n_features: int
Number of latent factors. default = 10
learning_rate: float
Learning rate used. default = 0.01
regularization: float
Parameter used to prevent overfitting. default = 0.02
init_mean: float
Mean of the normal distribution used to initialize
the factors. default = 0.1
init_std: float
Standard deviation of the normal distribution used to
initialize the factors. default = 0.1
n_interations: int
Number of iterations over the training data. default = 30
capper: bool (default=True)
Cap the preferences with maximum and minimum preferences
in the model.
with_preference: bool (default=False)
Return the recommendations with the estimated preferences if True.
Attributes
-----------
`model`: The data model instance that will be data source
for the recommender.
`items_selection_strategy`: The item candidates strategy that you
can choose for selecting the possible items to recommend.
default = ItemsNeighborhoodStrategy
`n_features`: int
Number of latent factors. default = 10
`learning_rate`: float
Learning rate used. default = 0.01
`regularization`: float
Parameter used to prevent overfitting. default = 0.02
`random_noise`: float
Parameter used to initialize the latent factors.
`n_interations`: int
Number of iterations over the training data
`capper`: bool (default=True)
Cap the preferences with maximum and minimum preferences
in the model.
`with_preference`: bool (default=False)
Return the recommendations with the estimated preferences if True.
`user_factors`: array of shape [n_users, n_features]
Matrix containing the latent item factors
`item_factors`: array of shape [n_items, n_features]
Matrix containing the latent item factors
Examples
-----------
>>> from scikits.crab.models.classes import MatrixPreferenceDataModel
>>> from scikits.crab.recommenders.svd.classes import MatrixFactorBasedRecommender
>>> from scikits.crab.recommenders.knn.item_strategies import ItemsNeighborhoodStrategy
>>> movies = {'Marcel Caraciolo': {'Lady in the Water': 2.5, \
'Snakes on a Plane': 3.5, \
'Just My Luck': 3.0, 'Superman Returns': 3.5, 'You, Me and Dupree': 2.5, \
'The Night Listener': 3.0}, \
'Paola Pow': {'Lady in the Water': 3.0, 'Snakes on a Plane': 3.5, \
'Just My Luck': 1.5, 'Superman Returns': 5.0, 'The Night Listener': 3.0, \
'You, Me and Dupree': 3.5}, \
'Leopoldo Pires': {'Lady in the Water': 2.5, 'Snakes on a Plane': 3.0, \
'Superman Returns': 3.5, 'The Night Listener': 4.0}, \
'Lorena Abreu': {'Snakes on a Plane': 3.5, 'Just My Luck': 3.0, \
'The Night Listener': 4.5, 'Superman Returns': 4.0, \
'You, Me and Dupree': 2.5}, \
'Steve Gates': {'Lady in the Water': 3.0, 'Snakes on a Plane': 4.0, \
'Just My Luck': 2.0, 'Superman Returns': 3.0, 'The Night Listener': 3.0, \
'You, Me and Dupree': 2.0}, \
'Sheldom': {'Lady in the Water': 3.0, 'Snakes on a Plane': 4.0, \
'The Night Listener': 3.0, 'Superman Returns': 5.0, \
'You, Me and Dupree': 3.5}, \
'Penny Frewman': {'Snakes on a Plane':4.5,'You, Me and Dupree':1.0, \
'Superman Returns':4.0}, \
'Maria Gabriela': {}}
>>> model = MatrixPreferenceDataModel(movies)
>>> items_strategy = ItemsNeighborhoodStrategy()
>>> recsys = MatrixFactorBasedRecommender( \
model=model, \
items_selection_strategy=items_strategy, \
n_features=2)
>>> #Return the recommendations for the given user.
>>> recsys.recommend('Leopoldo Pires')
array(['Just My Luck', 'You, Me and Dupree'],\
dtype='|S18')
Notes
-----------
This MatrixFactorizationRecommender does not yet provide
suppot for rescorer functions.
This MatrixFactorizationRecommender does not yet provide
suppot for DictDataModels.
References
-----------
"""
def __init__(self, model, items_selection_strategy=None,
n_features=10, learning_rate=0.01, regularization=0.02, init_mean=0.1,
init_stdev=0.1, n_interations=30, capper=True, with_preference=False):
SVDRecommender.__init__(self, model, with_preference)
self.capper = capper
self.n_features = n_features
self.learning_rate = learning_rate
self.regularization = regularization
self.init_mean = init_mean
self.init_stdev = init_stdev
self.n_interations = n_interations
self._global_bias = self._get_average_preference()
self.user_factors = None
self.item_factors = None
if items_selection_strategy is None:
self.items_selection_strategy = ItemsNeighborhoodStrategy()
else:
self.items_selection_strategy = items_selection_strategy
self.factorize()
def _init_models(self):
num_users = self.model.users_count()
num_items = self.model.items_count()
self.user_factors = np.empty(shape=(num_users, self.n_features),
dtype=float)
self.item_factors = np.empty(shape=(num_items, self.n_features),
dtype=float)
'''
pref_interval = self.model.max_preference() - self.model.min_preference()
default_value = math.sqrt(global_bias - pref_interval * 0.1) / self.n_features
interval = pref_interval * 0.1 / self.n_features
for i in range(len(self.n_features)):
for user_idx in self.model.num_users():
self.user_factors[user_idx, i] = default_value + (random.random() - 0.5) * interval * 0.2
for i in range(len(self.n_features)):
for item_idx in self.model.num_items():
self.item_factors[item_idx, i] = default_value + (random.random() - 0.5) * interval * 0.2
'''
#Initialize the matrix with normal distributed (Gaussian) Noise
self.user_factors = self.init_mean * np.random.randn(num_users, self.n_features) + self.init_stdev ** 2
self.item_factors = self.init_mean * np.random.randn(num_items, self.n_features) + self.init_stdev ** 2
def _get_average_preference(self):
if hasattr(self.model, 'index'):
mdat = np.ma.masked_array(self.model.index, np.isnan(self.model.index))
else:
raise TypeError('This model is not yet supported for this recommender.')
return np.mean(mdat)
def _predict(self, user_index, item_index, trailing=True):
#Compute the scalar product between two rows of two matrices
result = self._global_bias + np.sum(self.user_factors[user_index] *
self.item_factors[item_index])
if trailing:
max_preference = self.model.max_preference()
min_preference = self.model.min_preference()
if result > max_preference:
result = max_preference
elif result < min_preference:
result = min_preference
return result
def _train(self, rating_indices, update_user, update_item):
'''
Iterate once over rating data and adjust corresponding factors (stochastic gradient descent)
'''
err_total = 0.0
for user_idx, item_idx in rating_indices:
p = self._predict(user_idx, item_idx, False)
err = self.model.index[user_idx, item_idx] - p
err_total += err
#Adjust the factors
u_f = self.user_factors[user_idx]
i_f = self.item_factors[item_idx]
#Compute factor updates
delta_u = err * u_f - self.regularization * u_f
delta_i = err * u_f - self.regularization * i_f
#if necessary apply updates
if update_user:
self.user_factors[user_idx] += self.learning_rate * delta_u
if update_item:
self.item_factors[item_idx] += self.learning_rate * delta_i
return err_total
def _rating_indices(self):
if hasattr(self.model, 'index'):
rating_indices = [(idx, jdx) for idx in range(self.model.users_count())
for jdx in range(self.model.items_count())
if not np.isnan(self.model.index[idx, jdx])]
else:
raise TypeError('This model is not yet supported for this recommender.')
return rating_indices
def learn_factors(self, update_user=True, update_item=True):
rating_indices = self._rating_indices()
random.shuffle(rating_indices)
for index in range(self.n_interations):
err = self._train(rating_indices, update_user, update_item)
rmse = sqrt((err ** 2.0) / len(rating_indices))
logger.debug("Finished the interation %i with RMSE %f" % \
(index, rmse))
def factorize(self):
#init factor matrices
self._init_models()
#Learn the model parameters
self.learn_factors()
def recommend(self, user_id, how_many=None, **params):
'''
Return a list of recommended items, ordered from most strongly
recommend to least.
Parameters
----------
user_id: int or string
User for which recommendations are to be computed.
how_many: int
Desired number of recommendations (default=None ALL)
'''
self._set_params(**params)
candidate_items = self.all_other_items(user_id)
recommendable_items = self._top_matches(user_id, \
candidate_items, how_many)
return recommendable_items
def estimate_preference(self, user_id, item_id, **params):
'''
A preference is estimated by computing the dot-product
of the user and item feature vectors.
Parameters
----------
user_id: int or string
User for which recommendations are to be computed.
item_id: int or string
ID of item for which wants to find the estimated preference.
Returns
-------
Return an estimated preference if the user has not expressed a
preference for the item, or else the user's actual preference for the
item. If a preference cannot be estimated, returns None.
'''
preference = self.model.preference_value(user_id, item_id)
if not np.isnan(preference):
return preference
#How to catch the user_id and item_id from the matrix.
user_features = self.user_factors[np.where(self.model.user_ids() == user_id)]
item_features = self.item_factors[np.where(self.model.item_ids() == item_id)]
estimated = self._global_bias + np.sum(user_features * item_features)
if self.capper:
max_p = self.model.maximum_preference_value()
min_p = self.model.minimum_preference_value()
estimated = max_p if estimated > max_p else min_p \
if estimated < min_p else estimated
return estimated
def all_other_items(self, user_id, **params):
'''
Parameters
----------
user_id: int or string
User for which recommendations are to be computed.
Returns
---------
Return items in the `model` for which the user has not expressed
the preference and could possibly be recommended to the user.
'''
return self.items_selection_strategy.candidate_items(user_id, \
self.model)
def _top_matches(self, source_id, target_ids, how_many=None, **params):
'''
Parameters
----------
target_ids: array of shape [n_target_ids]
source_id: int or string
item id to compare against.
how_many: int
Desired number of most top items to recommend (default=None ALL)
Returns
--------
Return the top N matches
It can be user_ids or item_ids.
'''
#Empty target_ids
if target_ids.size == 0:
return np.array([])
estimate_preferences = np.vectorize(self.estimate_preference)
preferences = estimate_preferences(source_id, target_ids)
preferences = preferences[~np.isnan(preferences)]
target_ids = target_ids[~np.isnan(preferences)]
sorted_preferences = np.lexsort((preferences,))[::-1]
sorted_preferences = sorted_preferences[0:how_many] \
if how_many and sorted_preferences.size > how_many \
else sorted_preferences
if self.with_preference:
top_n_recs = np.array([(target_ids[ind], \
preferences[ind]) for ind in sorted_preferences])
else:
top_n_recs = np.array([target_ids[ind]
for ind in sorted_preferences])
return top_n_recs
|
tadejs/crab
|
scikits/crab/recommenders/svd/classes.py
|
Python
|
bsd-3-clause
| 14,222
|
[
"Gaussian"
] |
be52eb15643dd9eb1cd7ea7bcb4869074c8a5588009573874d8fb3293a14c616
|
from __future__ import unicode_literals, division, absolute_import
import pytest
from flexget.manager import Session
from flexget.plugins.api_trakt import ApiTrakt, TraktActor, TraktMovieSearchResult, TraktShowSearchResult, TraktShow
lookup_series = ApiTrakt.lookup_series
@pytest.mark.online
class TestTraktShowLookup(object):
config = """
templates:
global:
trakt_lookup: yes
# Access a tvdb field to cause lazy loading to occur
set:
afield: "{{tvdb_id}}{{trakt_ep_name}}"
tasks:
test:
mock:
- {title: 'House.S01E02.HDTV.XViD-FlexGet'}
- {title: 'Doctor.Who.2005.S02E03.PDTV.XViD-FlexGet'}
series:
- House
- Doctor Who 2005
test_unknown_series:
mock:
- {title: 'Aoeu.Htns.S01E01.htvd'}
series:
- Aoeu Htns
test_date:
mock:
- title: the daily show 2012-6-6
series:
- the daily show (with trevor noah)
test_absolute:
mock:
- title: naruto 128
series:
- naruto
test_search_result:
mock:
- {title: 'Shameless.2011.S01E02.HDTV.XViD-FlexGet'}
- {title: 'Shameless.2011.S03E02.HDTV.XViD-FlexGet'}
series:
- Shameless (2011)
test_search_success:
mock:
- {title: '11-22-63.S01E01.HDTV.XViD-FlexGet'}
series:
- 11-22-63
"""
def test_lookup_name(self, execute_task):
"""trakt: Test Lookup (ONLINE)"""
task = execute_task('test')
entry = task.find_entry(title='House.S01E02.HDTV.XViD-FlexGet')
assert entry['trakt_show_id'] == 1399, \
'Trakt_ID should be 1339 is %s for %s' % (entry['trakt_show_id'], entry['series_name'])
assert entry['trakt_series_status'] == 'ended', 'Series Status should be "ENDED" returned %s' \
% (entry['trakt_series_status'])
def test_lookup(self, execute_task):
"""trakt: Test Lookup (ONLINE)"""
task = execute_task('test')
entry = task.find_entry(title='House.S01E02.HDTV.XViD-FlexGet')
assert entry['trakt_ep_name'] == 'Paternity', \
'%s trakt_ep_name should be Paternity' % entry['title']
assert entry['trakt_series_status'] == 'ended', \
'runtime for %s is %s, should be "ended"' % (entry['title'], entry['trakt_series_status'])
assert entry['afield'] == '73255Paternity', 'afield was not set correctly'
assert task.find_entry(trakt_ep_name='School Reunion'), \
'Failed imdb lookup Doctor Who 2005 S02E03'
def test_unknown_series(self, execute_task):
# Test an unknown series does not cause any exceptions
task = execute_task('test_unknown_series')
# Make sure it didn't make a false match
entry = task.find_entry('accepted', title='Aoeu.Htns.S01E01.htvd')
assert entry.get('tvdb_id') is None, 'should not have populated tvdb data'
def test_search_results(self, execute_task):
task = execute_task('test_search_result')
entry = task.entries[0]
print entry['trakt_series_name'].lower()
assert entry['trakt_series_name'].lower() == 'Shameless'.lower(), 'lookup failed'
with Session() as session:
assert task.entries[1]['trakt_series_name'].lower() == 'Shameless'.lower(), 'second lookup failed'
assert len(session.query(TraktShowSearchResult).all()) == 1, 'should have added 1 show to search result'
assert len(session.query(TraktShow).all()) == 1, 'should only have added one show to show table'
assert session.query(TraktShow).first().title == 'Shameless', 'should have added Shameless and' \
'not Shameless (2011)'
# change the search query
session.query(TraktShowSearchResult).update({'search': "Shameless.S01E03.HDTV-FlexGet"})
session.commit()
lookupargs = {'title': "Shameless.S01E03.HDTV-FlexGet"}
series = ApiTrakt.lookup_series(**lookupargs)
assert series.tvdb_id == entry['tvdb_id'], 'tvdb id should be the same as the first entry'
assert series.id == entry['trakt_show_id'], 'trakt id should be the same as the first entry'
assert series.title.lower() == entry['trakt_series_name'].lower(), 'series name should match first entry'
def test_search_success(self, execute_task):
task = execute_task('test_search_success')
entry = task.find_entry('accepted', title='11-22-63.S01E01.HDTV.XViD-FlexGet')
assert entry.get('trakt_show_id') == 102771, 'Should have returned the correct trakt id'
def test_date(self, execute_task):
task = execute_task('test_date')
entry = task.find_entry(title='the daily show 2012-6-6')
# Make sure show data got populated
assert entry.get('trakt_show_id') == 2211, 'should have populated trakt show data'
# We don't support lookup by date at the moment, make sure there isn't a false positive
if entry.get('trakt_episode_id') == 173423:
assert False, 'We support trakt episode lookup by date now? Great! Change this test.'
else:
assert entry.get('trakt_episode_id') is None, 'false positive for episode match, we don\'t ' \
'support lookup by date'
def test_absolute(self, execute_task):
task = execute_task('test_absolute')
entry = task.find_entry(title='naruto 128')
# Make sure show data got populated
assert entry.get('trakt_show_id') == 46003, 'should have populated trakt show data'
# We don't support lookup by absolute number at the moment, make sure there isn't a false positive
if entry.get('trakt_episode_id') == 916040:
assert False, 'We support trakt episode lookup by absolute number now? Great! Change this test.'
else:
assert entry.get('trakt_episode_id') is None, 'false positive for episode match, we don\'t ' \
'support lookup by absolute number'
def test_lookup_actors(self, execute_task):
task = execute_task('test')
actors = ['Hugh Laurie',
'Jesse Spencer',
'Jennifer Morrison',
'Omar Epps',
'Robert Sean Leonard',
'Peter Jacobson',
'Olivia Wilde',
'Odette Annable',
'Charlyne Yi',
'Anne Dudek',
'Kal Penn',
'Jennifer Crystal Foley',
'Bobbin Bergstrom']
entry = task.find_entry(title='House.S01E02.HDTV.XViD-FlexGet')
trakt_actors = entry['trakt_series_actors'].values()
trakt_actors = [trakt_actor['name'] for trakt_actor in trakt_actors]
assert entry['series_name'] == 'House', 'series lookup failed'
assert set(trakt_actors) == set(actors), 'looking up actors for %s failed' % entry.get('title')
assert entry['trakt_series_actors']['297390']['name'] == 'Hugh Laurie', 'trakt id mapping failed'
assert entry['trakt_series_actors']['297390']['imdb_id'] == 'nm0491402', 'fetching imdb id for actor failed'
assert entry['trakt_series_actors']['297390']['tmdb_id'] == '41419', 'fetching tmdb id for actor failed'
with Session() as session:
actor = session.query(TraktActor).filter(TraktActor.name == 'Hugh Laurie').first()
assert actor is not None, 'adding actor to actors table failed'
assert actor.imdb_id == 'nm0491402', 'saving imdb_id for actors in table failed'
assert actor.trakt_id == '297390', 'saving trakt_id for actors in table failed'
assert actor.tmdb_id == '41419', 'saving tmdb_id for actors table failed'
@pytest.mark.online
class TestTraktList(object):
config = """
tasks:
test_trakt_movies:
trakt_list:
username: flexgettest
list: watchlist
type: movies
"""
def test_trakt_movies(self, execute_task):
task = execute_task('test_trakt_movies')
assert len(task.entries) == 1
entry = task.entries[0]
assert entry['title'] == '12 Angry Men (1957)'
assert entry['movie_name'] == '12 Angry Men'
assert entry['movie_year'] == 1957
assert entry['imdb_id'] == 'tt0050083'
@pytest.mark.online
class TestTraktWatchedAndCollected(object):
config = """
tasks:
test_trakt_watched:
metainfo_series: yes
trakt_lookup:
username: flexgettest
mock:
- title: Hawaii.Five-0.S04E13.HDTV-FlexGet
- title: The.Flash.2014.S01E10.HDTV-FlexGet
if:
- trakt_watched: accept
test_trakt_collected:
metainfo_series: yes
trakt_lookup:
username: flexgettest
mock:
- title: Homeland.2011.S02E01.HDTV-FlexGet
- title: The.Flash.2014.S01E10.HDTV-FlexGet
if:
- trakt_collected: accept
test_trakt_watched_movie:
trakt_lookup:
username: flexgettest
mock:
- title: Inside.Out.2015.1080p.BDRip-FlexGet
- title: The.Matrix.1999.1080p.BDRip-FlexGet
if:
- trakt_watched: accept
test_trakt_collected_movie:
trakt_lookup:
username: flexgettest
mock:
- title: Inside.Out.2015.1080p.BDRip-FlexGet
- title: The.Matrix.1999.1080p.BDRip-FlexGet
if:
- trakt_collected: accept
test_trakt_show_collected_progress:
disable: builtins
trakt_lookup:
username: flexgettest
trakt_list:
username: flexgettest
list: test
type: shows
strip_dates: yes
if:
- trakt_collected: accept
test_trakt_show_watched_progress:
disable: builtins
trakt_lookup:
username: flexgettest
trakt_list:
username: flexgettest
list: test
type: shows
strip_dates: yes
if:
- trakt_watched: accept
"""
def test_trakt_watched_lookup(self, execute_task):
task = execute_task('test_trakt_watched')
assert len(task.accepted) == 1, 'Episode should have been marked as watched and accepted'
entry = task.accepted[0]
assert entry['title'] == 'Hawaii.Five-0.S04E13.HDTV-FlexGet', 'title was not accepted?'
assert entry['series_name'] == 'Hawaii Five-0', 'wrong series was returned by lookup'
assert entry['trakt_watched'] == True, 'episode should be marked as watched'
def test_trakt_collected_lookup(self, execute_task):
task = execute_task('test_trakt_collected')
assert len(task.accepted) == 1, 'Episode should have been marked as collected and accepted'
entry = task.accepted[0]
assert entry['title'] == 'Homeland.2011.S02E01.HDTV-FlexGet', 'title was not accepted?'
assert entry['series_name'] == 'Homeland 2011', 'wrong series was returned by lookup'
assert entry['trakt_collected'] == True, 'episode should be marked as collected'
def test_trakt_watched_movie_lookup(self, execute_task):
task = execute_task('test_trakt_watched_movie')
print task.all_entries
assert len(task.accepted) == 1, 'Movie should have been accepted as it is watched on Trakt profile'
entry = task.accepted[0]
assert entry['title'] == 'Inside.Out.2015.1080p.BDRip-FlexGet', 'title was not accepted?'
assert entry['movie_name'] == 'Inside Out', 'wrong movie name'
assert entry['trakt_watched'] == True, 'movie should be marked as watched'
def test_trakt_collected_movie_lookup(self, execute_task):
task = execute_task('test_trakt_collected_movie')
assert len(task.accepted) == 1, 'Movie should have been accepted as it is collected on Trakt profile'
entry = task.accepted[0]
assert entry['title'] == 'Inside.Out.2015.1080p.BDRip-FlexGet', 'title was not accepted?'
assert entry['movie_name'] == 'Inside Out', 'wrong movie name'
assert entry['trakt_collected'] == True, 'movie should be marked as collected'
def test_trakt_show_watched_progress(self, execute_task):
task = execute_task('test_trakt_show_watched_progress')
assert len(task.accepted) == 1, 'One show should have been accepted as it is watched on Trakt profile'
entry = task.accepted[0]
assert entry['trakt_series_name'] == 'Chuck', 'wrong series was accepted'
assert entry['trakt_watched'] == True, 'the whole series should be marked as watched'
def test_trakt_show_collected_progress(self, execute_task):
task = execute_task('test_trakt_show_collected_progress')
assert len(task.accepted) == 1, 'One show should have been accepted as it is collected on Trakt profile'
entry = task.accepted[0]
assert entry['trakt_series_name'] == 'White Collar', 'wrong series was accepted'
assert entry['trakt_collected'] == True, 'the whole series should be marked as collected'
@pytest.mark.online
class TestTraktMovieLookup(object):
config = """
templates:
global:
trakt_lookup: yes
tasks:
test_lookup_sources:
mock:
- title: trakt id
trakt_movie_id: 481
- title: tmdb id
tmdb_id: 603
- title: imdb id
imdb_id: tt0133093
- title: slug
trakt_movie_slug: the-matrix-1999
- title: movie_name and movie_year
movie_name: The Matrix
movie_year: 1999
- title: The Matrix (1999)
test_lookup_actors:
mock:
- title: The Matrix (1999)
test_search_results:
mock:
- title: harry.potter.and.the.philosopher's.stone.720p.hdtv-flexget
test_search_results2:
mock:
- title: harry.potter.and.the.philosopher's.stone
"""
def test_lookup_sources(self, execute_task):
task = execute_task('test_lookup_sources')
for e in task.all_entries:
assert e['movie_name'] == 'The Matrix', 'looking up based on %s failed' % e['title']
def test_search_results(self, execute_task):
task = execute_task('test_search_results')
entry = task.entries[0]
assert entry['movie_name'].lower() == 'Harry Potter and The Philosopher\'s Stone'.lower(), 'lookup failed'
with Session() as session:
assert len(session.query(TraktMovieSearchResult).all()) == 1, 'should have added one movie to search result'
# change the search query
session.query(TraktMovieSearchResult).update({'search': "harry.potter.and.the.philosopher's"})
session.commit()
lookupargs = {'title': "harry.potter.and.the.philosopher's"}
movie = ApiTrakt.lookup_movie(**lookupargs)
assert movie.imdb_id == entry['imdb_id']
assert movie.title.lower() == entry['movie_name'].lower()
def test_lookup_actors(self, execute_task):
task = execute_task('test_lookup_actors')
assert len(task.entries) == 1
entry = task.entries[0]
actors = ['Keanu Reeves',
'Laurence Fishburne',
'Carrie-Anne Moss',
'Hugo Weaving',
'Gloria Foster',
'Joe Pantoliano',
'Marcus Chong',
'Julian Arahanga',
'Matt Doran',
'Belinda McClory',
'Anthony Ray Parker',
'Paul Goddard',
'Robert Taylor',
'David Aston',
'Marc Aden',
'Ada Nicodemou',
'Deni Gordon',
'Rowan Witt',
'Bill Young',
'Eleanor Witt',
'Tamara Brown',
'Janaya Pender',
'Adryn White',
'Natalie Tjen',
'David O\'Connor',
'Jeremy Ball',
'Fiona Johnson',
'Harry Lawrence',
'Steve Dodd',
'Luke Quinton',
'Lawrence Woodward',
'Michael Butcher',
'Bernard Ledger',
'Robert Simper',
'Chris Pattinson',
'Nigel Harbach',
'Rana Morrison']
trakt_actors = entry['trakt_movie_actors'].values()
trakt_actors = [trakt_actor['name'] for trakt_actor in trakt_actors]
assert entry['movie_name'] == 'The Matrix', 'movie lookup failed'
assert set(trakt_actors) == set(actors), 'looking up actors for %s failed' % entry.get('title')
assert entry['trakt_movie_actors']['7134']['name'] == 'Keanu Reeves', 'trakt id mapping failed'
assert entry['trakt_movie_actors']['7134']['imdb_id'] == 'nm0000206', 'fetching imdb id for actor failed'
assert entry['trakt_movie_actors']['7134']['tmdb_id'] == '6384', 'fetching tmdb id for actor failed'
with Session() as session:
actor = session.query(TraktActor).filter(TraktActor.name == 'Keanu Reeves').first()
assert actor is not None, 'adding actor to actors table failed'
assert actor.imdb_id == 'nm0000206', 'saving imdb_id for actors in table failed'
assert actor.trakt_id == '7134', 'saving trakt_id for actors in table failed'
assert actor.tmdb_id == '6384', 'saving tmdb_id for actors table failed'
|
antivirtel/Flexget
|
tests/test_trakt.py
|
Python
|
mit
| 18,401
|
[
"CRYSTAL"
] |
38cd89026ec645e428a0ee92de212f7b5d11392ed0b375673c009a655fae83f4
|
"""
It is used to compile the web framework
"""
from __future__ import print_function
import os
import tempfile
import shutil
import subprocess
import gzip
import sys
from DIRAC import gLogger, gConfig, rootPath, S_OK, S_ERROR
from DIRAC.Core.Utilities.CFG import CFG
__RCSID__ = "$Id$"
class WebAppCompiler(object):
def __init__(self, params):
self.__params = params
self.__extVersion = '4.2.1.883'
self.__extDir = 'extjs' # this directory will contain all the resources required by ExtJS
self.__sdkDir = params.extjspath if self.__params.extjspath is not None else '/opt/dirac/extjs/ext-4.2.1.883'
self.__webAppPath = os.path.join(self.__params.destination, 'WebAppDIRAC', 'WebApp')
self.__staticPaths = [os.path.join(self.__webAppPath, 'static')]
if self.__params.name != 'WebAppDIRAC':
self.__staticPaths.append(os.path.join(self.__params.destination, self.__params.name, 'WebApp', 'static'))
self.__classPaths = [os.path.join(self.__webAppPath, *p) for p in (("static", "core", "js", "utils"),
("static", "core", "js", "core"))]
self.__extjsDirsToCopy = []
self.__extjsFilesToCopy = []
if self.__extVersion in self.__sdkDir:
self.__classPaths.append(os.path.join(os.path.dirname(self.__sdkDir), "examples", "ux"))
self.__classPaths.append(os.path.join(os.path.dirname(self.__sdkDir), "examples", "ux", "form"))
self.__sdkPath = os.path.join(self.__sdkDir, "src")
self.__extjsDirsToCopy.append(os.path.join(os.path.dirname(self.__sdkDir), "resources"))
self.__extjsFilesToCopy.append(os.path.join(os.path.dirname(self.__sdkDir), "ext-all-dev.js"))
else:
self.__classPaths.append(os.path.join(os.path.dirname(self.__sdkDir), "build/ext-all-debug.js"))
self.__classPaths.append(os.path.join(os.path.dirname(self.__sdkDir), "build/packages/ux/classic/ux-debug.js"))
self.__classPaths.append(
os.path.join(os.path.dirname(self.__sdkDir), "build/packages/charts/classic/charts-debug.js"))
self.__sdkPath = self.__sdkDir
self.__extjsDirsToCopy.append(os.path.join(os.path.dirname(self.__sdkDir), "build/packages"))
self.__extjsDirsToCopy.append(os.path.join(os.path.dirname(self.__sdkDir), "build/classic"))
self.__extjsFilesToCopy.append(os.path.join(os.path.dirname(self.__sdkDir), "build/ext-all.js"))
self.__extjsFilesToCopy.append(os.path.join(os.path.dirname(self.__sdkDir), "build/ext-all-debug.js"))
self.__extjsFilesToCopy.append(
os.path.join(os.path.dirname(self.__sdkDir), "build/packages/ux/classic/ux-debug.js"))
self.__debugFlag = str(gLogger.getLevel() in ('DEBUG', 'VERBOSE', 'INFO')).lower()
self.__compileTemplate = os.path.join(self.__params.destination, 'WebAppDIRAC', "Lib", "CompileTemplates")
# this place will be used, if sencha cmd is not available
self.__senchacmddir = os.path.join(rootPath, "sbin", "Sencha", "Cmd")
self.__senchaVersion = "v6.5.0.180"
self.__appDependency = {}
self.__dependencySection = "Dependencies"
def __deployResources(self):
"""
This method copy the required files and directories to the appropriate place
"""
extjsDirPath = os.path.join(self.__webAppPath, 'static', self.__extDir)
if not os.path.exists(extjsDirPath):
try:
os.mkdir(extjsDirPath)
except OSError as e:
gLogger.error("Can not create release extjs", repr(e))
return S_ERROR("Can not create release extjs" + repr(e))
for dirSrc in self.__extjsDirsToCopy:
try:
shutil.copytree(dirSrc, os.path.join(extjsDirPath, os.path.split(dirSrc)[1]))
except OSError as e:
if e.errno != 17:
errorMsg = "Can not copy %s directory to %s: %s" % (
dirSrc, os.path.join(extjsDirPath, os.path.split(dirSrc)[1]), repr(e))
gLogger.error(errorMsg)
return S_ERROR(errorMsg)
else:
gLogger.warn("%s directory is already exists. It will be not overwritten!" %
os.path.join(extjsDirPath, os.path.split(dirSrc)[1]))
for filePath in self.__extjsFilesToCopy:
try:
shutil.copy(filePath, extjsDirPath)
except (IOError, OSError) as e:
errorMsg = "Can not copy %s file to %s: %s" % (filePath, extjsDirPath, repr(e))
gLogger.warn(errorMsg)
return S_OK()
def __writeINFile(self, tplName, extra=False):
"""
It creates a temporary file using different templates. For example: /tmp/zmathe/tmp4sibR5.compilejs.app.tpl
This is required to compile the web framework.
:params str tplName: it is the name of the template
:params dict extra: it contains the application location, which will be added to the temporary file
:return: the location of the file
"""
inTpl = os.path.join(self.__compileTemplate, tplName)
try:
with open(inTpl) as infd:
data = infd.read()
except IOError:
return S_ERROR("%s does not exist" % inTpl)
data = data.replace("%EXT_VERSION%", self.__extVersion)
if extra:
for k in extra:
data = data.replace("%%%s%%" % k.upper(), extra[k])
outfd, filepath = tempfile.mkstemp(".compilejs.%s" % tplName)
os.write(outfd, data)
os.close(outfd)
return S_OK(filepath)
def __cmd(self, cmd):
"""
This is used to execure a command
:params list cmd: sencha command which will be executed
"""
env = {}
for k in ('LD_LIBRARY_PATH', 'DYLD_LIBRARY_PATH'):
if k in os.environ:
env[k] = os.environ[k]
os.environ.pop(k)
gLogger.verbose("Command is: %s" % " ".join(cmd))
try:
result = subprocess.call(cmd)
except OSError as e:
message = 'Command does not exists: %s -> %s' % (','.join(cmd), e)
gLogger.error(message)
return S_ERROR(message)
for k in env:
os.environ[k] = env[k]
return result
def __compileApp(self, extPath, extName, appName, extClassPath=""):
"""
It compiles an application
:param str extPath: directory full path, which contains the applications
for example: /tmp/zmathe/tmpFxr5LzDiracDist/WebAppDIRAC/WebApp/static/DIRA
:param str extName: the name of the application for example: DIRAC or LHCbDIRAC, etc
:param str appName: the name of the application for example: Accounting
:param str extClassPath: if we compile an extension, we can provide the class path of the base class
"""
result = self.__writeINFile("app.tpl", {'APP_LOCATION': '%s.%s.classes.%s' % (extName, appName, appName)})
if not result['OK']:
return result
inFile = result['Value']
buildDir = os.path.join(extPath, appName, 'build')
try:
shutil.rmtree(buildDir)
except OSError:
pass
if not os.path.isdir(buildDir):
try:
os.makedirs(buildDir)
except IOError, excp:
return S_ERROR("Can't create build dir %s" % excp)
outFile = os.path.join(buildDir, "index.html")
compressedJsFile = os.path.join(buildDir, appName + '.js')
classPath = list(self.__classPaths)
excludePackage = ",%s.*" % extName
if extClassPath != "":
classPath.append(extClassPath)
excludePackage = ",DIRAC.*,%s.*" % extName
classPath.append(os.path.join(extPath, appName, "classes"))
cmd = ['sencha', '-sdk', self.__sdkPath, 'compile', '-classpath=%s' % ",".join(classPath),
'page', '-name=page', '-input-file', inFile, '-out', outFile, 'and',
'restore', 'page', 'and', 'exclude', '-not', '-namespace', 'Ext.dirac.*%s' % excludePackage, 'and',
'concat', '-yui', compressedJsFile]
if self.__cmd(cmd):
return S_ERROR("Error compiling %s.%s" % (extName, appName))
return S_OK()
def __zip(self, staticPath, stack=""):
"""
It compress the compiled applications
"""
c = 0
l = "|/-\\"
for entry in os.listdir(staticPath):
n = stack + l[c % len(l)]
if entry[-3:] == ".gz":
continue
ePath = os.path.join(staticPath, entry)
if os.path.isdir(ePath):
self.__zip(ePath, n)
continue
zipPath = "%s.gz" % ePath
if os.path.isfile(zipPath):
if os.stat(zipPath).st_mtime > os.stat(ePath).st_mtime:
continue
print("%s%s\r" % (n, " " * (20 - len(n))), end=' ')
c += 1
inf = gzip.open(zipPath, "wb", 9)
with open(ePath, "rb") as outf:
buf = outf.read(8192)
while buf:
inf.write(buf)
buf = outf.read(8192)
inf.close()
def run(self):
"""
This compiles the web framework
"""
# if the sencha does not installed, it will exit
self.__checkSenchacmd()
retVal = self.__deployResources()
if not retVal['OK']:
return retVal
# we are compiling an extension of WebAppDIRAC
if self.__params.name != 'WebAppDIRAC':
self.__appDependency.update(self.getAppDependencies())
staticPath = os.path.join(self.__webAppPath, "static")
gLogger.notice("Compiling core: %s" % staticPath)
result = self.__writeINFile("core.tpl")
if not result['OK']:
return result
inFile = result['Value']
buildDir = os.path.join(staticPath, "core", "build")
try:
shutil.rmtree(buildDir)
except OSError:
pass
outFile = os.path.join(staticPath, "core", "build", "index.html")
gLogger.verbose(" IN file written to %s" % inFile)
cmd = ['sencha', '-sdk', self.__sdkPath, 'compile', '-classpath=%s' % ",".join(self.__classPaths),
'page', '-yui', '-input-file', inFile, '-out', outFile]
if self.__cmd(cmd):
gLogger.error("Error compiling JS")
return S_ERROR("Failed compiling core")
try:
os.unlink(inFile)
except IOError:
pass
for staticPath in self.__staticPaths:
gLogger.notice("Looing into %s" % staticPath)
extDirectoryContent = os.listdir(staticPath)
if len(extDirectoryContent) == 0:
return S_ERROR("The extension directory is empty:" + str(staticPath))
else:
extNames = [ext for ext in extDirectoryContent if 'DIRAC' in ext]
if len(extNames) > 1:
extNames.remove('DIRAC')
extName = extNames[-1]
gLogger.notice("Detected extension:%s" % extName)
extPath = os.path.join(staticPath, extName)
if not os.path.isdir(extPath):
continue
gLogger.notice("Exploring %s" % extName)
for appName in os.listdir(extPath):
expectedJS = os.path.join(extPath, appName, "classes", "%s.js" % appName)
if not os.path.isfile(expectedJS):
continue
classPath = self.__getClasspath(extName, appName)
gLogger.notice("Trying to compile %s.%s.classes.%s CLASSPATH=%s" % (extName, appName, appName, classPath))
result = self.__compileApp(extPath, extName, appName, classPath)
if not result['OK']:
return result
gLogger.notice("Zipping static files")
self.__zip(staticPath)
gLogger.notice("Done")
return S_OK()
def __getClasspath(self, extName, appName):
classPath = ''
dependency = self.__appDependency.get("%s.%s" % (extName, appName), "")
if dependency != "":
depPath = dependency.split(".")
for staticPath in self.__staticPaths:
expectedJS = os.path.join(staticPath, depPath[0], depPath[1], "classes")
gLogger.notice(expectedJS)
if not os.path.isdir(expectedJS):
continue
classPath = expectedJS
return classPath
def __checkSenchacmd(self):
"""
Before we start the distribution the sencha cmd must be checked
"""
try:
self.__cmd(["sencha"])
except OSError, _:
try:
path = os.path.join(self.__senchacmddir, self.__senchaVersion)
if os.path.exists(path):
sys.path.append(path)
syspath = os.environ['PATH']
os.environ['PATH'] = path + os.pathsep + syspath
except OSError, _:
raise OSError("sencha cmd is not installed!")
def getAppDependencies(self):
"""
Generate the dependency dictionary
:return: Dict
"""
if self.__params.name != 'WebAppDIRAC':
self._loadWebAppCFGFiles(self.__params.name)
dependency = {}
fullName = "%s/%s" % ("/WebApp", self.__dependencySection)
result = gConfig.getOptions(fullName)
if not result['OK']:
gLogger.error(result['Message'])
return dependency
optionsList = result['Value']
for opName in optionsList:
opVal = gConfig.getValue("%s/%s" % (fullName, opName))
dependency[opName] = opVal
return dependency
def _loadWebAppCFGFiles(self, extension):
"""
Load WebApp/web.cfg definitions
:param str extension: the module name of the extension of WebAppDirac for example: LHCbWebDIRAC
"""
exts = [extension, "WebAppDIRAC"]
webCFG = CFG()
for modName in reversed(exts):
cfgPath = os.path.join(self.__params.destination, "%s/WebApp" % modName, "web.cfg")
if not os.path.isfile(cfgPath):
gLogger.verbose("Web configuration file %s does not exists!" % cfgPath)
continue
try:
modCFG = CFG().loadFromFile(cfgPath)
except Exception, excp:
gLogger.error("Could not load %s: %s" % (cfgPath, excp))
continue
gLogger.verbose("Loaded %s" % cfgPath)
expl = ["/WebApp"]
while len(expl):
current = expl.pop(0)
if not modCFG.isSection(current):
continue
if modCFG.getOption("%s/AbsoluteDefinition" % current, False):
gLogger.verbose("%s:%s is an absolute definition" % (modName, current))
try:
webCFG.deleteKey(current)
except:
pass
modCFG.deleteKey("%s/AbsoluteDefinition" % current)
else:
for sec in modCFG[current].listSections():
expl.append("%s/%s" % (current, sec))
# Add the modCFG
webCFG = webCFG.mergeWith(modCFG)
gConfig.loadCFG(webCFG)
|
chaen/DIRAC
|
FrameworkSystem/Client/WebAppCompiler.py
|
Python
|
gpl-3.0
| 14,043
|
[
"DIRAC"
] |
a448dd50fa6c7292e121c59ceededd464c039df5b829241d4d8d49ba8417bec5
|
#!/usr/bin/python
import os
import sys
import cPickle
import argparse
import traceback
from urlparse import urlparse
from collections import defaultdict
import random
H2 = 'h2'
H1 = 'http/1.1'
SPDY = 'spdy'
PROTOCOLS = { H2, H1, SPDY }
class Fetch(object):
def __init__(self, url, request_time, new_connection, push, size, ident, prior, code):
self.url = url
self.request_time = request_time
self.new_connection = new_connection
self.push = push
self.size = size
self.ident = ident
self.prior = prior
self.code = code
self.response_time = None
def parseWebPageFetch(key, main_url, output, protocol):
objs = {}
urlToIdent = {}
h1Connections = defaultdict(int) # State of TCP connections for a domain
ident = 0
last_request = None
last_response = None
protocol_fail = False
# Go line by line through the pageloader output
for line in output.split('\n'):
chunks = line.split()
# Look for keyword lines only
if len(chunks) < 2:
continue
try:
time = float(chunks[0].strip('[s]'))
except:
continue
ident += 1
if chunks[1].startswith('TCP_CONNECTION='): # Last request caused a new TCP connection to be created
objs[last_request].new_connection = True
elif chunks[1].startswith('PUSH='): # Accepted a push request (this never happens)
url = getURL(chunks[1].split('=', 1)[1])
objs[ident] = Fetch(url, time, False, True, None, ident, (objs[last_response].ident if last_response else None), None)
last_request = ident
urlToIdent[url] = ident
elif chunks[1].startswith('REQUEST=') or chunks[1].startswith('REDIRECT='): # Making a new request (possibly due to a redirect)
url = getURL(chunks[1].split('=', 1)[1])
objs[ident] = Fetch(url, time, False, False, None, ident, (objs[last_response].ident if last_response else None), None)
last_request = ident
urlToIdent[url] = ident
# if protocol == H1: # H1 connections must be handled in post because there is no event from the h1 library
# domain = urlparse(url).netloc # This implementation assumes unlimited connections
# if h1Connections[domain] == 0:
# objs[ident].new_connection = True
# else:
# objs[ident].new_connection = False
# h1Connections[domain] -= 1
elif chunks[1].startswith('RESPONSE='): # Received a response
url = getURL(chunks[1].split('=', 1)[1])
last_response = urlToIdent[url]
objs[last_response].size = chunks[2].split('=')[1]
objs[last_response].response_time = time
# if protocol == H1: # Free TCP connection now available
# domain = urlparse(url).netloc
# h1Connections[domain] += 1
elif chunks[1] == 'PROTOCOL_NEGOTIATE_FAILED': # There has been a protocl error on one of the connections.
protocol_fail = True # There is no indication given as to which, so we are left to guess
elif chunks[1].startswith('CODE='): # Response code value for the last response
objs[last_response].code = chunks[1].split('=')[1]
elif chunks[1] == 'VISITED' and args.visit:
break # Terminate once the browser determines the page to be 'loaded'
for obj in sorted(objs.itervalues(), key = lambda v: v.ident):
if not obj.code and protocol != H1 and (obj.url.startswith('http://') or protocol_fail):
obj.code = 'not_supported' # The object was never fetched. Likely cause is that it cannot be loaded over this protocol
fetch_time = None
if obj.response_time: # Calculate the time to fetch the object
fetch_time = obj.response_time - obj.request_time
if obj.prior: # If there was a prior object, then calculate the time from when it received (i.e., include processing time)
fetch_time = obj.response_time - objs[obj.prior].response_time
prior = objs[obj.prior].url if obj.prior else None
args.outfile.write(key + ' ' + main_url + ' ' + protocol + ' ' + obj.url + ' ' + str(obj.new_connection) + ' ' +
str(obj.push) + ' ' + str(obj.size) + ' ' + str(fetch_time) + ' ' + str(prior) + ' ' + str(obj.code) + '\n')
def getURL(uri):
return uri.rstrip('/') # Sometimes present, sometimes not. Make consistent
if __name__ == "__main__":
# set up command line args
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter,\
description='Read phase3 log files')
parser.add_argument('infile', nargs='?', type=argparse.FileType('r'), default=sys.stdin)
parser.add_argument('outfile', nargs='?', type=argparse.FileType('w'), default=sys.stdout)
parser.add_argument('-u', '--url', default=None, help='url of the website')
parser.add_argument('-p', '--protocol', default=None, help='protocol used')
parser.add_argument('-v', '--visit', action='store_true', default=False, help='Terminate on visited event')
args = parser.parse_args()
count = random.randint(0, 32768)
output = ''
for line in args.infile:
output += line
parseWebPageFetch('fetch'+str(count), args.url, output, args.protocol)
|
scoky/node-http2
|
example/phase3_matteo.py
|
Python
|
mit
| 5,484
|
[
"VisIt"
] |
b448ee833b993db512b97a5996650e2f3e30ab23709742d7ddabfdd577e68b89
|
from functions.functionProperty import FunctionPropertySQLTestCase
class FunctionPropertyTestCase(FunctionPropertySQLTestCase):
"""
@db_name functionproperty
@optimizer_mode on
@tags ORCA
"""
sql_dir = 'sql/'
ans_dir = 'expected/'
out_dir = 'output/'
|
lintzc/gpdb
|
src/test/tinc/tincrepo/functions/functionProperty/test_functionproperty.py
|
Python
|
apache-2.0
| 285
|
[
"ORCA"
] |
1f3a2905fa5f4c14fb14474031db8487ffa0ebecf15927a54fbd942f38c32de8
|
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
**********************************************
espressopp.integrator.LangevinThermostatHybrid
**********************************************
As LangevinThermostat, but for use in AdResS systems, to allow the application of different thermostat friction constants (:math:`\gamma`) to different AdResS regions. Uses three values of :math:`\gamma`, one for the atomistic region, one for the hybrid region, and one for the coarse-grained region.
>>> # create FixedTupleList object
>>> ftpl = espressopp.FixedTupleListAdress(system.storage)
>>> ftpl.addTuples(tuples)
>>> system.storage.setFixedTuplesAdress(ftpl)
>>>
>>> system.storage.decompose()
>>>
>>> # create Langevin thermostat
>>> thermostat = espressopp.integrator.LangevinThermostatHybrid(system,ftpl)
>>>
>>> # set Langevin friction constants
>>> thermostat.gamma = 0.0 # units = 1/timeunit
>>> print "# gamma for atomistic region for langevin thermostat = ",thermostat.gamma
>>> thermostat.gammahy = 10.0 # units = 1/timeunit
>>> print "# gamma for hybrid region for langevin thermostat = ",thermostat.gammahy
>>> thermostat.gammacg = 10.0 # units = 1/timeunit
>>> print "# gamma for coarse-grained region for langevin thermostat = ",thermostat.gammacg
>>>
>>> # set temperature of thermostat
>>> thermostat.temperature = kBT
>>> # kBT is a float with the value of temperature in reduced units, i.e. temperature * Boltzmann's constant in appropriate units
No need to include the line
>>> thermostat.adress = True
as is necessary in the case of the basic LangevinThermostat, because LangevinThermostatHybrid is always only used in AdResS systems
"""
from espressopp.esutil import cxxinit
from espressopp import pmi
from espressopp.integrator.Extension import *
from _espressopp import integrator_LangevinThermostatHybrid
class LangevinThermostatHybridLocal(ExtensionLocal, integrator_LangevinThermostatHybrid):
def __init__(self, system, fixedtuplelist):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, integrator_LangevinThermostatHybrid, system,fixedtuplelist)
if pmi.isController :
class LangevinThermostatHybrid(Extension):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.integrator.LangevinThermostatHybridLocal',
pmiproperty = [ 'gamma', 'gammahy','gammacg','temperature' ]
)
|
govarguz/espressopp
|
src/integrator/LangevinThermostatHybrid.py
|
Python
|
gpl-3.0
| 3,363
|
[
"ESPResSo"
] |
78aab5cdbe3cf189b7fee38f6c66b44ba99d4fd70a9a5aa46a1386230cb386e2
|
"""
history.py
Summary:
This is the core history file module for the rvic model.
The core of the module is the Tape class. The basic procedure is as
follows:
- initialization: sets tape options, determines filenames, etc.
- update: method that incorporates new fluxes into the history tape.
- __next_update_out_data: method to determine when to update the
out_data container
- __next_write_out_data: method to determine when to write the out_data
container
- finish: method to close all remaining history tapes.
"""
import os
import numpy as np
from netCDF4 import Dataset, date2num, num2date, stringtochar
from datetime import datetime
from time_utility import ord_to_datetime
from logging import getLogger
from log import LOG_NAME
from share import SECSPERDAY, HOURSPERDAY, TIMEUNITS, NC_INT, NC_FLOAT, NC_CHAR
from share import NC_DOUBLE, WATERDENSITY, MONTHSPERYEAR
import share
# -------------------------------------------------------------------- #
# create logger
log = getLogger(LOG_NAME)
# -------------------------------------------------------------------- #
# -------------------------------------------------------------------- #
# RVIC History File Object
class Tape(object):
""" History Tape Object"""
# ---------------------------------------------------------------- #
# Init
def __init__(self, time_ord, caseid, Rvar, tape_num=0,
fincl=['streamflow'], mfilt=1, ndens=2, nhtfrq=0,
avgflag='A', units='kg m-2 s-1',
file_format='NETCDF4_CLASSIC', outtype='grid',
grid_lons=False, grid_lats=False, grid_area=None, out_dir='.',
calendar=None, glob_ats=None, zlib=True, complevel=4,
least_significant_digit=None):
self._tape_num = tape_num
self._time_ord = time_ord # Days since basetime
self._caseid = caseid # Case ID and prefix for outfiles
self._fincl = list(fincl) # Fields to include in history file
self._mfilt = mfilt # Maximum number of time samples
self._ndens = ndens
if self._ndens == 1: # Output file precision
self._ncprec = NC_FLOAT
else:
self._ncprec = NC_DOUBLE
self._nhtfrq = nhtfrq # Write frequency
self._avgflag = avgflag # Average Flag (A,I,X,M)
self._outtype = outtype # Outfile type (grid, array)
self._count = 0
self.files_count = 0
self._file_format = file_format
self._calendar = calendar
self._out_dir = out_dir
self._glob_ats = glob_ats
self.__get_rvar(Rvar) # Get the initial Rvar fields
self._grid_shape = grid_area.shape
self._out_data = {}
# ------------------------------------------------------------ #
# calculate the step size for each out_data timestep (units=days)
if self._nhtfrq > 0:
# If some number of timesteps
self._out_data_stepsize = self._nhtfrq * self._dt / SECSPERDAY
elif self._nhtfrq < 0:
# If some number hours
self._out_data_stepsize = -1 * self._nhtfrq / HOURSPERDAY
else:
# If monthly
self._out_data_stepsize = None # varies by month
log.debug('_out_data_stepsize: %s', self._out_data_stepsize)
# ------------------------------------------------------------ #
# ------------------------------------------------------------ #
# Get Grid Lons/Lats if outtype is grid
if outtype == 'grid':
self._out_data_shape = self._grid_shape
if type(grid_lons) == np.ndarray and type(grid_lats) == np.ndarray:
self._grid_lons = grid_lons
self._grid_lats = grid_lats
else:
raise ValueError('Must include grid lons / lats if '
'outtype == grid')
else:
self._out_data_shape = (self._num_outlets, )
# ------------------------------------------------------------ #
# ------------------------------------------------------------ #
# Get units multiplier (size of noutlets)
self._units = units
if units in ['kg/m2/s', 'kg m-2 s-1', 'kg m^-2 s^-1',
'kg*m-2*s-1', 'kg s-1 m-2']:
self._units_mult = np.ones_like(self._outlet_y_ind,
dtype=np.float64)
elif units in ['m3/s', 'm^3/s', 'm3 s-1']:
# kg/m2/s --> m3/s
self._units_mult = grid_area[self._outlet_y_ind,
self._outlet_x_ind]
self._units_mult /= WATERDENSITY
elif units in ['mm/day', 'mm d-1', 'mm d^-1', 'mm/day']:
# kg/m2/s --> mm/day over basin area
self._units_mult = grid_area[self._outlet_y_ind,
self._outlet_x_ind]
self._units_mult *= SECSPERDAY
self._units_mult /= WATERDENSITY
self._units_mult /= self._outlet_upstream_area
elif units in ['gal/day', 'gpd', 'gal d-1']:
self._units_mult = grid_area[self._outlet_y_ind,
self._outlet_x_ind]
self._units_mult /= WATERDENSITY
self._units_mult *= 2.28E7
elif units in ['cfs', 'ft^3 s-1', 'f3/s']:
self._units_mult = grid_area[self._outlet_y_ind,
self._outlet_x_ind]
self._units_mult /= WATERDENSITY
self._units_mult *= 35.3
elif units in ['acre-ft/d']:
self._units_mult = grid_area[self._outlet_y_ind,
self._outlet_x_ind]
self._units_mult /= WATERDENSITY
self._units_mult *= 70.0
else:
raise ValueError('{0} is not a valid units string'.format(units))
# ------------------------------------------------------------ #
# ------------------------------------------------------------ #
# netCDF variable options
self.ncvaropts = {'zlib': zlib,
'complevel': complevel,
'least_significant_digit': least_significant_digit}
# ------------------------------------------------------------ #
# ------------------------------------------------------------ #
# get current timestamp
self._timestamp = ord_to_datetime(self._time_ord, TIMEUNITS,
self._calendar)
# ------------------------------------------------------------ #
# ------------------------------------------------------------ #
# Initialize the temporary history fields
self._temp_data = {}
for field in self._fincl:
self._temp_data[field] = np.zeros(self._num_outlets,
dtype=np.float64)
# ------------------------------------------------------------ #
# ------------------------------------------------------------ #
# Determine the format of the output filename
if self._avgflag == 'I':
self._fname_format = os.path.join(out_dir,
"%s.rvic.h%s%s.%%Y-%%m-%%d-%%H-%%M-%%S.nc" % (self._caseid, self._tape_num, self._avgflag.lower()))
else:
if self._nhtfrq == 0:
self._fname_format = os.path.join(out_dir,
"%s.rvic.h%s%s.%%Y-%%m.nc" % (self._caseid, self._tape_num, self._avgflag.lower()))
elif (self._nhtfrq == -24) or (nhtfrq*self._dt == SECSPERDAY):
self._fname_format = os.path.join(out_dir,
"%s.rvic.h%s%s.%%Y-%%m-%%d.nc" % (self._caseid, self._tape_num, self._avgflag.lower()))
else:
self._fname_format = os.path.join(out_dir,
"%s.rvic.h%s%s.%%Y-%%m-%%d-%%H.nc" % (self._caseid, self._tape_num, self._avgflag.lower()))
self._rest_fname_format = os.path.join(out_dir,
"%s.rvic.rh%s.%%Y-%%m-%%d-%%H-%%M-%%S.nc" % (self._caseid, self._tape_num))
# ------------------------------------------------------------ #
# ------------------------------------------------------------ #
# Determine when the next write should be and initialize out_data
self.__next_write_out_data()
# ------------------------------------------------------------ #
# ------------------------------------------------------------ #
# Determine when the update of out_data should be
self.__next_update_out_data()
# ------------------------------------------------------------ #
log.debug(self.__repr__())
# ---------------------------------------------------------------- #
# ---------------------------------------------------------------- #
# Write a summary
def __str__(self):
return 'History Tape - {0}'.format(self.filename)
def __repr__(self):
parts = ['------- Summary of History Tape Settings -------',
'\t# caseid: {0}'.format(self._caseid),
'\t# fincl: {0}'.format(','.join(self._fincl)),
'\t# nhtfrq: {0}'.format(self._nhtfrq),
'\t# mfilt: {0}'.format(self._mfilt),
'\t# ncprec: {0}'.format(self._ncprec),
'\t# avgflag: {0}'.format(self._avgflag),
'\t# fname_format: {0}'.format(self._fname_format),
'\t# file_format: {0}'.format(self._file_format),
'\t# outtype: {0}'.format(self._outtype),
'\t# out_dir: {0}'.format(self._out_dir),
'\t# calendar: {0}'.format(self._calendar),
'\t# units: {0}'.format(self._units),
' ------- End of History Tape Settings -------']
return '\n'.join(parts)
# ---------------------------------------------------------------- #
# ---------------------------------------------------------------- #
# Update the history tapes with new fluxes
def update(self, data2tape, time_ord):
""" Update the tape with new data"""
# ------------------------------------------------------------ #
# Check that the time_ord is in sync
if self._time_ord != time_ord:
raise ValueError('rout_var.time_ord does not match the time_ord '
'passed in by the convolution call')
# ------------------------------------------------------------ #
# ------------------------------------------------------------ #
# Get the next timestamp
self._time_ord += self._dt / SECSPERDAY
self._timestamp = ord_to_datetime(self._time_ord, TIMEUNITS,
calendar=self._calendar)
# ------------------------------------------------------------ #
# ------------------------------------------------------------ #
# Advance the Count
self._count += 1
# ------------------------------------------------------------ #
# ------------------------------------------------------------ #
# Update the fields
for field in self._fincl:
tracer = 'LIQ'
log.debug('updating {0}'.format(field))
fdata = data2tape[field][tracer]
if self._avgflag == 'A':
self._temp_data[field] += fdata
elif self._avgflag == 'I':
if self._count == self._update_count:
self._temp_data[field] = fdata[:]
elif self._avgflag == 'X':
self._temp_data[field] = np.maximum(self._temp_data[field],
fdata)
elif self._avgflag == 'M':
self._temp_data[field] = np.minimum(self._temp_data[field],
fdata)
else:
raise ValueError('Average flag ({0}) does not match any of'
' (A,I,X,M)'.format(self._avgflag))
# ------------------------------------------------------------ #
# If count == _update_count, add to _out_data
# Average first, if necessary
if (self._avgflag == 'A' and self._count == self._update_count):
self.__average()
if self._count == self._update_count:
# move the data to the out_data structure
self.__update_out_data()
# Determine next update
self.__next_update_out_data()
# zero out temp_data
for field in self._fincl:
self._temp_data[field][:] = 0.0
# ------------------------------------------------------------ #
# ---------------------------------------------------------------- #
# ---------------------------------------------------------------- #
def write_initial(self):
pass
# ---------------------------------------------------------------- #
# ---------------------------------------------------------------- #
def __next_write_out_data(self):
"""determine the maximum size of out_data"""
log.debug('determining size of out_data')
self._out_data_i = 0 # position counter for out_data array
# ------------------------------------------------------------ #
# b0 is first timestep of next period
# b1 is end of last timestep of next period
# time when out_data will start (now)
b0 = self._time_ord
self._begtime = b0
# determine time when out_data will be full
if self._mfilt == 'year':
if self._nhtfrq == 0:
mfilt = MONTHSPERYEAR
else:
t1 = datetime(self._timestamp.year + 1, 1, 1)
b1 = date2num(t1, TIMEUNITS, calendar=self._calendar)
# calculate the mfilt value
mfilt = int(round((b1 - b0) / self._out_data_stepsize))
elif self._mfilt == 'month':
if self._nhtfrq == 0:
mfilt = 1
else:
if self._timestamp.month == 12:
t1 = datetime(self._timestamp.year + 1, 2, 1)
else:
t1 = datetime(self._timestamp.year,
self._timestamp.month + 1, 1)
b1 = date2num(t1, TIMEUNITS, calendar=self._calendar)
# calculate the mfilt value
mfilt = int(round((b1 - b0) / self._out_data_stepsize))
elif self._mfilt == 'day':
if self._nhtfrq != 0:
b1 = b0 + 1.0
else:
raise ValueError('Incompatable values for NHTFRQ and MFILT')
# calculate the mfilt value
mfilt = int(round((b1 - b0) / self._out_data_stepsize))
else:
mfilt = int(self._mfilt)
# ------------------------------------------------------------ #
# ------------------------------------------------------------ #
if mfilt < 1:
mfilt = 1
self._out_data_write = mfilt-1
self._out_times = np.empty(mfilt, dtype=np.float64)
if self._avgflag != 'I':
self._out_time_bnds = np.empty((mfilt, 2), dtype=np.float64)
shape = (mfilt, ) + self._out_data_shape
log.debug('out_data shape: %s', shape)
log.debug('_out_data_write: %s', self._out_data_write)
for field in self._fincl:
self._out_data[field] = np.zeros(shape, dtype=np.float64)
self._out_data_has_values = False
# ---------------------------------------------------------------- #
# ---------------------------------------------------------------- #
# fill in out_data
def __update_out_data(self):
self._out_data_has_values = True
# ------------------------------------------------------------ #
# Update the _out_data fields
for field in self._fincl:
if self._outtype == 'grid':
# ---------------------------------------------------- #
# Grid the fields
self._out_data[field][self._out_data_i,
self._outlet_y_ind,
self._outlet_x_ind] = self._temp_data[field][:] * self._units_mult
# ---------------------------------------------------- #
else:
self._out_data[field][self._out_data_i, :] = self._temp_data[field] * self._units_mult
# ------------------------------------------------------------ #
self._out_times[self._out_data_i] = self._write_ord
if self._avgflag != 'I':
self._out_time_bnds[self._out_data_i, :] = self._time_bnds
# ------------------------------------------------------------ #
# if out_data is full, write
if self._out_data_i == self._out_data_write:
self.finish()
self._out_data_i = 0
# Determine when the next write should be and initialize out_data
self.__next_write_out_data()
else:
self._out_data_i += 1
log.debug('out_data counter is %s of %s', self._out_data_i,
self._out_data_write)
# ---------------------------------------------------------------- #
# ---------------------------------------------------------------- #
def finish(self):
"""write out_data"""
log.debug('finishing tape %s', self._tape_num)
if self._out_data_has_values:
if self._outtype == 'grid':
self.__write_grid()
else:
self.__write_array()
self.files_count += 1
# ---------------------------------------------------------------- #
# ---------------------------------------------------------------- #
# Get import rvar fields
def __get_rvar(self, rvar):
""" Get the rvar Fields that are useful for writing output """
self._dt = rvar.unit_hydrograph_dt
self._num_outlets = rvar.n_outlets
self._outlet_decomp_ind = rvar.outlet_decomp_ind
self._outlet_x_ind = rvar.outlet_x_ind
self._outlet_y_ind = rvar.outlet_y_ind
self._outlet_lon = rvar.outlet_lon
self._outlet_lat = rvar.outlet_lat
self._outlet_name = rvar.outlet_name
self._outlet_upstream_area = rvar.outlet_upstream_area
# ---------------------------------------------------------------- #
# ---------------------------------------------------------------- #
# Determine next write time
def __next_update_out_data(self):
""" Determine the count for when the next write should occur """
# ------------------------------------------------------------ #
# If monthly, write at (YYYY,MM,1,0,0)
# b0 is first timestep of next period
# b1 is end of last timestep of next period
b0 = self._time_ord
self._begtime = b0
if self._nhtfrq == 0:
if self._timestamp.month == 12:
b1 = date2num(datetime(self._timestamp.year + 1, 2, 1),
TIMEUNITS, calendar=self._calendar)
else:
b1 = date2num(datetime(self._timestamp.year,
self._timestamp.month + 1, 1),
TIMEUNITS, calendar=self._calendar)
else:
b1 = b0 + self._out_data_stepsize
# ------------------------------------------------------------ #
# ------------------------------------------------------------ #
# Get the number of timesteps and datestamp for the next write
# next_ord is the ord_time when the write will happen
self._update_count = int(round((b1 - b0) / (self._dt / SECSPERDAY)))
# ------------------------------------------------------------ #
# ------------------------------------------------------------ #
# Get next file names and timeord
if self._avgflag == 'I':
self._write_ord = b1
self.filename = num2date(b1, TIMEUNITS,
calendar=self._calendar).strftime(self._fname_format)
else:
self._time_bnds = np.array([[b0, b1]])
self._write_ord = np.average(self._time_bnds)
self.filename = num2date(b0, TIMEUNITS,
calendar=self._calendar).strftime(self._fname_format)
self.rest_filename = num2date(b1, TIMEUNITS,
calendar=self._calendar).strftime(self._fname_format)
# ------------------------------------------------------------ #
# ------------------------------------------------------------ #
# Set the count to zero
self._count = 0
# ------------------------------------------------------------ #
# ---------------------------------------------------------------- #
# ---------------------------------------------------------------- #
# Average fields
def __average(self):
""" Take the average based on the number of accumulated timesteps """
for field in self._fincl:
self._temp_data[field] /= self._count
# ---------------------------------------------------------------- #
# ---------------------------------------------------------------- #
# Write grid style history file
def __write_grid(self):
""" Write history file """
# ------------------------------------------------------------ #
# Open file
f = Dataset(self.filename, 'w', self._file_format)
# ------------------------------------------------------------ #
# ------------------------------------------------------------ #
# Time Variable
time = f.createDimension('time', None)
time = f.createVariable('time', self._ncprec, ('time',))
time[:] = self._out_times[:self._out_data_i+1]
for key, val in share.time.__dict__.iteritems():
if val:
setattr(time, key, val)
time.calendar = self._calendar
if self._avgflag != 'I':
nv = f.createDimension('nv', 2)
time.bounds = 'time_bnds'
time_bnds = f.createVariable('time_bnds', self._ncprec,
('time', 'nv',), **self.ncvaropts)
time_bnds[:, :] = self._out_time_bnds[:self._out_data_i+1]
# ------------------------------------------------------------ #
# ------------------------------------------------------------ #
# Setup Coordinate Variables
if self._grid_lons.ndim > 1:
coords = ('yc', 'xc',)
# Grid is not regular
xc = f.createDimension('xc', self._grid_lons.shape[1])
yc = f.createDimension('yc', self._grid_lons.shape[0])
xc = f.createVariable('xc', self._ncprec, coords, **self.ncvaropts)
yc = f.createVariable('yc', self._ncprec, coords, **self.ncvaropts)
xc[:, :] = self._grid_lons
yc[:, :] = self._grid_lats
for key, val in share.xc.__dict__.iteritems():
if val:
setattr(xc, key, val)
for key, val in share.yc.__dict__.iteritems():
if val:
setattr(yc, key, val)
else:
coords = ('lat', 'lon',)
lon = f.createDimension('lon', len(self._grid_lons))
lat = f.createDimension('lat', len(self._grid_lats))
lon = f.createVariable('lon', self._ncprec, ('lon',),
**self.ncvaropts)
lat = f.createVariable('lat', self._ncprec, ('lat',),
**self.ncvaropts)
lon[:] = self._grid_lons
lat[:] = self._grid_lats
for key, val in share.lon.__dict__.iteritems():
if val:
setattr(lon, key, val)
for key, val in share.lat.__dict__.iteritems():
if val:
setattr(lat, key, val)
# ------------------------------------------------------------ #
# ------------------------------------------------------------ #
# Write Fields
tcoords = ('time',) + coords
for field in self._fincl:
var = f.createVariable(field, self._ncprec, tcoords,
**self.ncvaropts)
var[:, :] = self._out_data[field][:self._out_data_i+1]
for key, val in getattr(share, field).__dict__.iteritems():
if val:
setattr(var, key, val)
var.units = self._units
if self._grid_lons.ndim > 1:
var.coordinates = " ".join(coords)
# ------------------------------------------------------------ #
# ------------------------------------------------------------ #
# write global attributes
self._glob_ats.update()
for key, val in self._glob_ats.atts.iteritems():
if val:
setattr(f, key, val)
# ------------------------------------------------------------ #
f.close()
log.info('Finished writing %s' % self.filename)
# ---------------------------------------------------------------- #
# ---------------------------------------------------------------- #
# Write array style history file
def __write_array(self):
""" Write history file """
# ------------------------------------------------------------ #
# Open file
f = Dataset(self.filename, 'w', self._file_format)
# ------------------------------------------------------------ #
# ------------------------------------------------------------ #
# Time Variable
time = f.createDimension('time', None)
time = f.createVariable('time', self._ncprec, ('time',),
**self.ncvaropts)
time[:] = self._out_times[:self._out_data_i+1]
for key, val in share.time.__dict__.iteritems():
if val:
setattr(time, key, val)
time.calendar = self._calendar
if self._avgflag != 'I':
nv = f.createDimension('nv', 2)
time.bounds = 'time_bnds'
time_bnds = f.createVariable('time_bnds', self._ncprec,
('time', 'nv',), **self.ncvaropts)
time_bnds[:, :] = self._out_time_bnds[:self._out_data_i+1]
# ------------------------------------------------------------ #
# ------------------------------------------------------------ #
# Setup Coordinate Variables
coords = ('outlets',)
outlets = f.createDimension('outlets', self._num_outlets)
nocoords = coords + ('nc_chars',)
char_names = stringtochar(self._outlet_name)
chars = f.createDimension(nocoords[1], char_names.shape[1])
# ------------------------------------------------------------ #
# ------------------------------------------------------------ #
# Variables
outlet_lon = f.createVariable('lon', self._ncprec, coords,
**self.ncvaropts)
outlet_lat = f.createVariable('lat', self._ncprec, coords,
**self.ncvaropts)
outlet_x_ind = f.createVariable('outlet_x_ind', NC_INT, coords,
**self.ncvaropts)
outlet_y_ind = f.createVariable('outlet_y_ind', NC_INT, coords,
**self.ncvaropts)
outlet_decomp_ind = f.createVariable('outlet_decomp_ind', NC_INT,
coords, **self.ncvaropts)
onm = f.createVariable('outlet_name', NC_CHAR, nocoords,
**self.ncvaropts)
outlet_lon[:] = self._outlet_lon
outlet_lat[:] = self._outlet_lat
outlet_x_ind[:] = self._outlet_x_ind
outlet_y_ind[:] = self._outlet_y_ind
outlet_decomp_ind[:] = self._outlet_decomp_ind
onm[:, :] = char_names
for key, val in share.outlet_lon.__dict__.iteritems():
if val:
setattr(outlet_lon, key, val)
for key, val in share.outlet_lat.__dict__.iteritems():
if val:
setattr(outlet_lat, key, val)
for key, val in share.outlet_y_ind.__dict__.iteritems():
if val:
setattr(outlet_y_ind, key, val)
for key, val in share.outlet_x_ind.__dict__.iteritems():
if val:
setattr(outlet_x_ind, key, val)
for key, val in share.outlet_decomp_ind.__dict__.iteritems():
if val:
setattr(outlet_decomp_ind, key, val)
for key, val in share.outlet_name.__dict__.iteritems():
if val:
setattr(onm, key, val)
# ------------------------------------------------------------ #
# ------------------------------------------------------------ #
# Write Fields
tcoords = ('time',) + coords
for field in self._fincl:
var = f.createVariable(field, self._ncprec, tcoords,
**self.ncvaropts)
var[:, :] = self._out_data[field][:self._out_data_i+1]
for key, val in getattr(share, field).__dict__.iteritems():
if val:
setattr(var, key, val)
var.units = self._units
# ------------------------------------------------------------ #
# ------------------------------------------------------------ #
# write global attributes
self._glob_ats.update()
for key, val in self._glob_ats.atts.iteritems():
if val:
setattr(f, key, val)
f.featureType = "timeSeries"
# ------------------------------------------------------------ #
f.close()
log.info('Finished writing %s', self.filename)
# ---------------------------------------------------------------- #
# ---------------------------------------------------------------- #
# write initial flux
# def write_restart(self):
# """Write history tape state, matches CESM-RVIC format"""
# """dims nx, ny, allrof, string_length, fname_lenp2, fname_len,
# len1, scalar, max_chars, max_nflds, max_flds"""
# # ------------------------------------------------------------ #
# # Open file
# f = Dataset(self.rest_filename, 'w', self._file_format)
# # ------------------------------------------------------------ #
# # ------------------------------------------------------------ #
# # Dimensions
# nx = f.createDimension('nx', self._grid_shape[1])
# ny = f.createDimension('ny', self._grid_shape[0])
# allrof = f.createDimension('allrof', )
# string_length = f.createDimension('string_length', 8)
# fname_lenp2 = f.createDimension('fname_lenp2', 34)
# fname_len = f.createDimension('fname_len', 32)
# len1 = f.createDimension('len1', 1)
# scalar = f.createDimension('scalar', 1)
# max_chars = f.createDimension('max_chars', 128)
# max_nflds = f.createDimension('max_nflds', 2)
# max_flds = f.createDimension('max_flds', 15)
# # ------------------------------------------------------------ #
# # ------------------------------------------------------------ #
# # Write Fields
# restvars = OrderedDict()
# restvars['nhtfrq'] = f.createVariable('nhtfrq', NC_INT, ('scalar', ))
# restvars['mfilt'] = f.createVariable('mfilt', NC_INT, ('scalar', ))
# restvars['ncprec'] = f.createVariable('ncprec', NC_INT, ('scalar', ))
# restvars['fincl'] = f.createVariable('fincl', NC_CHAR, ('max_flds', 'fname_lenp2',))
# restvars['fexcl'] = f.createVariable('fexcl', NC_CHAR, ('max_flds', 'fname_lenp2',))
# restvars['nflds'] = f.createVariable('nflds', NC_INT, ('scalar', ))
# restvars['ntimes'] = f.createVariable('ntimes', NC_INT, ('scalar', ))
# restvars['is_endhist'] = f.createVariable('is_endhist', NC_INT, ('scalar', ))
# restvars['begtime'] = f.createVariable('begtime', NC_DOUBLE, ('scalar', ))
# restvars['hpindex'] = f.createVariable('hpindex', NC_INT, ('max_nflds', ))
# restvars['avgflag'] = f.createVariable('avgflag', NC_CHAR, ('max_nflds', 'len1',))
# restvars['name'] = f.createVariable('name', NC_CHAR, ('max_nflds', 'fname_len', ))
# restvars['long_name'] = f.createVariable('long_name', NC_CHAR, ('max_nflds', 'max_chars', ))
# restvars['units'] = f.createVariable('units', NC_CHAR, ('max_nflds', 'max_chars', ))
# restvars['nhtfrq'][:] = self._nhtfrq
# restvars['mfilt'][:] = self._mfilt
# restvars['ncprec'][:] = self._ndens
# # restvars['fincl'][:, :] = self._fincl
# restvars['fexcl'][:, :] = self._fexcl
# restvars['nflds'][:] = len(self._fincl)
# restvars['ntimes'][:] = 1
# restvars['is_endhist'][:] = 0
# restvars['begtime'][:] = self._begtime
# restvars['hpindex'][:] = 'help'
# restvars['avgflag'][:, :] = self._avgflag
# restvars['name'][:, :] = self._fincl
# restvars['long_name'][:, :] = 'help'
# restvars['units'][:, :] = 'help'
# for name, var in restvas.iteritems():
# ncvar = getattr(share, name)
# for key, val in ncvar.__dict__.iteritems():
# if val:
# setattr(var, key, val)
# # ------------------------------------------------------------ #
# # ------------------------------------------------------------ #
# # ------------------------------------------------------------ #
# # write global attributes
# self._glob_ats.update()
# for key, val in self._glob_ats.atts.iteritems():
# if val:
# setattr(f, key, val)
# f.title = "RVIC Restart History information, required to continue a simulation"
# f.comment = "This entire file NOT needed for drystart, startup, or branch simulations"
# f.featureType = "timeSeries"
# # ------------------------------------------------------------ #
# return self.filename, self.rest_filename
# # ---------------------------------------------------------------- #
# -------------------------------------------------------------------- #
|
bartnijssen/RVIC
|
rvic/core/history.py
|
Python
|
gpl-3.0
| 35,223
|
[
"NetCDF"
] |
c1a6ccd36962b10919e720896439af3c434d22dcec468a930e78935df7507900
|
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import shutil
from unittest import TestCase
from bigdl.nn.criterion import *
from bigdl.nn.layer import *
from pyspark.sql.types import *
from pyspark.sql.functions import col, udf
from zoo.common.nncontext import *
from zoo.feature.common import *
from zoo.orca.learn.bigdl import Estimator
from bigdl.optim.optimizer import Adam, SGD, ValidationSummary
from zoo.pipeline.api.keras import layers as ZLayer
from zoo.pipeline.api.keras.models import Model as ZModel
from zoo.orca.data import SparkXShards
from zoo.orca.learn.metrics import Accuracy, MAE
from zoo.orca.learn.trigger import EveryEpoch, SeveralIteration
from zoo.orca.data.pandas import read_csv
class TestEstimatorForKeras(TestCase):
def get_estimator_df(self):
self.sc = init_nncontext()
data = self.sc.parallelize([
((2.0, 1.0), (1.0, 2.0)),
((1.0, 2.0), (2.0, 1.0)),
((2.0, 1.0), (1.0, 2.0)),
((1.0, 2.0), (2.0, 1.0))])
val_data = self.sc.parallelize([
((2.0, 1.0), (1.0, 2.0)),
((1.0, 2.0), (2.0, 1.0))])
schema = StructType([
StructField("features", ArrayType(DoubleType(), False), False),
StructField("label", ArrayType(DoubleType(), False), False)])
self.sqlContext = SQLContext(self.sc)
df = self.sqlContext.createDataFrame(data, schema)
val_df = self.sqlContext.createDataFrame(val_data, schema)
return df, val_df
def get_estimator_df2(self):
self.sc = init_nncontext()
data = self.sc.parallelize([
((0.0, 0.0), 1.0),
((1.0, 1.0), 2.0),
((2.0, 2.0), 1.0),
((3.0, 3.0), 2.0),
((4.0, 4.0), 1.0),
((5.0, 5.0), 2.0),
((6.0, 6.0), 1.0),
((7.0, 7.0), 2.0),
((8.0, 8.0), 1.0),
((9.0, 9.0), 2.0)
])
schema = StructType([
StructField("features", ArrayType(DoubleType(), False), False),
StructField("label", DoubleType(), False)])
self.sqlContext = SQLContext(self.sc)
df = self.sqlContext.createDataFrame(data, schema)
return df
def test_nnEstimator(self):
from zoo.pipeline.nnframes import NNModel
linear_model = Sequential().add(Linear(2, 2))
mse_criterion = MSECriterion()
df, _ = self.get_estimator_df()
est = Estimator.from_bigdl(model=linear_model, loss=mse_criterion, optimizer=Adam(),
feature_preprocessing=SeqToTensor([2]),
label_preprocessing=SeqToTensor([2]))
res0 = est.predict(df)
res0_c = res0.collect()
est.fit(df, 2, batch_size=4)
nn_model = NNModel(est.get_model(), feature_preprocessing=SeqToTensor([2]))
res1 = nn_model.transform(df)
res2 = est.predict(df)
res1_c = res1.collect()
res2_c = res2.collect()
assert type(res1).__name__ == 'DataFrame'
assert type(res2).__name__ == 'DataFrame'
assert len(res1_c) == len(res2_c)
for idx in range(len(res1_c)):
assert res1_c[idx]["prediction"] == res2_c[idx]["prediction"]
with tempfile.TemporaryDirectory() as tempdirname:
temp_path = os.path.join(tempdirname, "model")
est.save(temp_path)
est2 = Estimator.from_bigdl(model=linear_model, loss=mse_criterion)
est2.load(temp_path, optimizer=Adam(), loss=mse_criterion,
feature_preprocessing=SeqToTensor([2]), label_preprocessing=SeqToTensor([2]))
est2.set_constant_gradient_clipping(0.1, 1.2)
est2.clear_gradient_clipping()
res3 = est2.predict(df)
res3_c = res3.collect()
assert type(res3).__name__ == 'DataFrame'
assert len(res1_c) == len(res3_c)
for idx in range(len(res1_c)):
assert res1_c[idx]["prediction"] == res3_c[idx]["prediction"]
est2.fit(df, 4, batch_size=4)
data = self.sc.parallelize([
((2.0, 1.0), (1.0, 2.0)),
((1.0, 2.0), (2.0, 1.0)),
((2.0, 1.0), (1.0, 2.0)),
((1.0, 2.0), (2.0, 1.0))])
data_shard = SparkXShards(data)
data_shard = data_shard.transform_shard(lambda feature_label_tuple: {
"x": np.stack([np.expand_dims(np.array(feature_label_tuple[0][0]), axis=0),
np.expand_dims(np.array(feature_label_tuple[0][1]), axis=0)], axis=1),
"y": np.stack([np.expand_dims(np.array(feature_label_tuple[1][0]), axis=0),
np.expand_dims(np.array(feature_label_tuple[1][1]), axis=0)], axis=1)
})
res4 = est.predict(data_shard)
res4_c = res4.collect()
assert type(res4).__name__ == 'SparkXShards'
for idx in range(len(res4_c)):
assert abs(res4_c[idx]["prediction"][0][0] - res3_c[idx]["prediction"][0]) == 0
assert abs(res4_c[idx]["prediction"][0][1] - res3_c[idx]["prediction"][1]) == 0
est.fit(data_shard, 1, batch_size=4)
res5 = est.predict(data_shard)
res5_c = res5.collect()
res6 = est.predict(df)
res6_c = res6.collect()
for idx in range(len(res5_c)):
assert abs(res5_c[idx]["prediction"][0][0] - res6_c[idx]["prediction"][0]) == 0
assert abs(res5_c[idx]["prediction"][0][1] - res6_c[idx]["prediction"][1]) == 0
def test_nnEstimator_evaluation(self):
df = self.get_estimator_df2()
linear_model = Sequential().add(Linear(2, 2)).add(LogSoftMax())
est = Estimator.from_bigdl(model=linear_model, loss=ClassNLLCriterion(), optimizer=Adam(),
feature_preprocessing=SeqToTensor([2]),
label_preprocessing=SeqToTensor([1]),
metrics=Accuracy())
est.fit(data=df, epochs=10, batch_size=8)
result = est.evaluate(df, batch_size=8)
shift = udf(lambda p: float(p.index(max(p))), DoubleType())
pred = est.predict(df).withColumn("prediction", shift(col('prediction'))).cache()
correct = pred.filter("label=prediction").count()
overall = pred.count()
accuracy = correct * 1.0 / overall
assert accuracy == round(result['Top1Accuracy'], 2)
def test_nnEstimator_multiInput(self):
zx1 = ZLayer.Input(shape=(1,))
zx2 = ZLayer.Input(shape=(1,))
zz = ZLayer.merge([zx1, zx2], mode="concat")
zy = ZLayer.Dense(2)(zz)
zmodel = ZModel([zx1, zx2], zy)
criterion = MSECriterion()
df, _ = self.get_estimator_df()
estimator = Estimator.from_bigdl(model=zmodel, loss=criterion,
feature_preprocessing=[[1], [1]])
estimator.fit(df, epochs=5, batch_size=4)
pred = estimator.predict(df)
pred_data = pred.collect()
assert type(pred).__name__ == 'DataFrame'
def test_nnEstimator_multiInput_cols(self):
from pyspark.ml.linalg import Vectors
from pyspark.sql import SparkSession
spark = SparkSession \
.builder \
.getOrCreate()
df = spark.createDataFrame(
[(1, 35, 109.0, Vectors.dense([2.0, 5.0, 0.5, 0.5]), 1.0),
(2, 58, 2998.0, Vectors.dense([4.0, 10.0, 0.5, 0.5]), 2.0),
(3, 18, 123.0, Vectors.dense([3.0, 15.0, 0.5, 0.5]), 1.0),
(4, 18, 123.0, Vectors.dense([3.0, 15.0, 0.5, 0.5]), 1.0)],
["user", "age", "income", "history", "label"])
x1 = ZLayer.Input(shape=(1,))
x2 = ZLayer.Input(shape=(2,))
x3 = ZLayer.Input(shape=(2, 2,))
user_embedding = ZLayer.Embedding(5, 10)(x1)
flatten = ZLayer.Flatten()(user_embedding)
dense1 = ZLayer.Dense(2)(x2)
gru = ZLayer.LSTM(4, input_shape=(2, 2))(x3)
merged = ZLayer.merge([flatten, dense1, gru], mode="concat")
zy = ZLayer.Dense(2)(merged)
zmodel = ZModel([x1, x2, x3], zy)
criterion = ClassNLLCriterion()
est = Estimator.from_bigdl(model=zmodel, loss=criterion, optimizer=Adam(learningrate=0.1),
feature_preprocessing=[[1], [2], [2, 2]])
est.fit(df, epochs=1, batch_size=4, feature_cols=["user", "age", "income", "history"])
res = est.predict(df, feature_cols=["user", "age", "income", "history"])
res_c = res.collect()
assert type(res).__name__ == 'DataFrame'
def test_nnEstimator_multiOutput_cols(self):
from pyspark.ml.linalg import Vectors
from pyspark.sql import SparkSession
spark = SparkSession \
.builder \
.getOrCreate()
df = spark.createDataFrame(
[(1.0, 2.0, 1.0, 2.0),
(2.0, 2.0, 2.0, 1.0),
(3.0, 2.0, 1.0, 2.0),
(4.0, 1.0, 1.0, 2.0)],
["user", "age", "label1", "label2"])
linear_model = Sequential().add(Linear(2, 2))
mse_criterion = MSECriterion()
est = Estimator.from_bigdl(model=linear_model, loss=mse_criterion, optimizer=Adam(),
feature_preprocessing=SeqToTensor([2]),
label_preprocessing=SeqToTensor([2]))
est.fit(df, 1, batch_size=4, feature_cols=["user", "age"], label_cols=["label1", "label2"])
result = est.predict(df, feature_cols=["user", "age"])
result_c = result.collect()
assert type(result).__name__ == 'DataFrame'
def test_nnEstimator_fit_with_train_val_summary(self):
model = Sequential().add(Linear(2, 2))
criterion = MSECriterion()
df, val_df = self.get_estimator_df()
from zoo.orca.learn.metrics import MAE
est = Estimator.from_bigdl(model=model, loss=criterion, optimizer=Adam(), metrics=[MAE()],
feature_preprocessing=SeqToTensor([2]),
label_preprocessing=SeqToTensor([2]))
tmp_dir = tempfile.mkdtemp()
est.set_tensorboard(log_dir=tmp_dir, app_name="estTest")
est.fit(df, epochs=5, batch_size=4, validation_data=val_df, validation_trigger=EveryEpoch(),
checkpoint_trigger=SeveralIteration(1))
res = est.predict(df)
loss_result = est.get_train_summary("Loss")
mae_result = est.get_validation_summary("MAE")
assert type(res).__name__ == 'DataFrame'
assert len(loss_result) == 5
assert len(mae_result) == 4
def test_xshards_spark_estimator(self):
resource_path = os.path.join(os.path.split(__file__)[0], "../../../resources")
def transform(df):
result = {
"x": np.stack([df['user'].to_numpy(), df['item'].to_numpy()], axis=1),
"y": df['label'].to_numpy()
}
return result
file_path = os.path.join(resource_path, "orca/learn/ncf2.csv")
data_shard = read_csv(file_path)
data_shard = data_shard.transform_shard(transform)
model = Sequential()
model.add(Linear(2, 2))
model.add(LogSoftMax())
optim_method = SGD(learningrate=0.01)
with tempfile.TemporaryDirectory() as temp_dir_name:
estimator = Estimator.from_bigdl(model=model, optimizer=optim_method,
loss=ClassNLLCriterion(),
metrics=Accuracy(),
model_dir=temp_dir_name,
feature_preprocessing=SeqToTensor([2]),
label_preprocessing=SeqToTensor([1]))
estimator.set_constant_gradient_clipping(0.1, 1.2)
r1 = estimator.predict(data=data_shard)
r_c = r1.collect()
estimator.set_tensorboard(log_dir=temp_dir_name, app_name="test")
estimator.fit(data=data_shard, epochs=5, batch_size=8, validation_data=data_shard,
checkpoint_trigger=EveryEpoch())
summary = estimator.get_train_summary(tag="Loss")
temp_path = os.path.join(temp_dir_name, "save_model")
estimator.save(temp_path)
with self.assertRaises(Exception) as context:
Estimator.from_bigdl(model=model, optimizer=optim_method,
loss=ClassNLLCriterion(),
metrics=['accuracy'],
model_dir=temp_dir_name,
feature_preprocessing=SeqToTensor([2]),
label_preprocessing=SeqToTensor([1]))
self.assertTrue('Only orca metrics are supported, but get str' in
str(context.exception))
eval_result = estimator.evaluate(data=data_shard,
batch_size=8)
assert isinstance(eval_result, dict)
result = estimator.predict(data=data_shard)
assert type(result).__name__ == 'SparkXShards'
result_c = result.collect()
df = self.get_estimator_df2()
r0 = estimator.predict(df)
r0_c = r0.collect()
assert type(r0).__name__ == 'DataFrame'
for idx in range(len(r0_c)):
assert abs(r0_c[idx]["prediction"][0] - result_c[0]["prediction"][idx][0]) <= 1e-06
assert abs(r0_c[idx]["prediction"][1] - result_c[0]["prediction"][idx][1]) <= 1e-06
estimator.fit(data=df, epochs=6, batch_size=8, validation_data=df,
validation_trigger=EveryEpoch())
summary = estimator.get_train_summary("Loss")
# test load from checkpoint
est2 = Estimator.from_bigdl(model=Sequential(), optimizer=None, loss=None,
metrics=[Accuracy()],
model_dir=None)
est2.load(temp_dir_name, loss=ClassNLLCriterion(), is_checkpoint=True)
r2 = est2.predict(data=data_shard)
r2_c = r2.collect()
assert (result_c[0]["prediction"] == r2_c[0]["prediction"]).all()
# resume training
est2.fit(data=data_shard, epochs=10, batch_size=8, validation_data=data_shard,
checkpoint_trigger=EveryEpoch())
est2.evaluate(data=data_shard, batch_size=8)
# test load from saved model
est3 = Estimator.from_bigdl(model=Sequential(), optimizer=None, loss=None,
model_dir=None)
est3.load(temp_path, optimizer=optim_method, loss=ClassNLLCriterion())
r3 = est3.predict(data=data_shard)
r3_c = r3.collect()
assert (r3_c[0]["prediction"] == r2_c[0]["prediction"]).all()
def test_xshards_spark_estimator_multi_inputs(self):
resource_path = os.path.join(os.path.split(__file__)[0], "../../../resources")
def transform(df):
result = {
"x": [np.expand_dims(df['user'].to_numpy(), axis=1),
np.expand_dims(df['item'].to_numpy(), axis=1)],
"y": df['label'].to_numpy()
}
return result
file_path = os.path.join(resource_path, "orca/learn/ncf2.csv")
data_shard = read_csv(file_path)
data_shard = data_shard.transform_shard(transform)
zx1 = ZLayer.Input(shape=(1,))
zx2 = ZLayer.Input(shape=(1,))
zz = ZLayer.merge([zx1, zx2], mode="concat")
zy = ZLayer.Dense(2)(zz)
model = ZModel([zx1, zx2], zy)
optim_method = SGD(learningrate=0.01)
with tempfile.TemporaryDirectory() as temp_dir_name:
estimator = Estimator.from_bigdl(model=model, optimizer=optim_method,
loss=ClassNLLCriterion(),
metrics=[Accuracy()],
model_dir=temp_dir_name)
estimator.set_constant_gradient_clipping(0.1, 1.2)
r1 = estimator.predict(data=data_shard)
r_c = r1.collect()
estimator.set_tensorboard(log_dir=temp_dir_name, app_name="test")
estimator.fit(data=data_shard, epochs=5, batch_size=8, validation_data=data_shard,
checkpoint_trigger=EveryEpoch())
summary = estimator.get_train_summary(tag="Loss")
temp_path = os.path.join(temp_dir_name, "save_model")
estimator.save(temp_path)
eval_result = estimator.evaluate(data=data_shard,
batch_size=8)
if __name__ == "__main__":
import pytest
pytest.main([__file__])
|
intel-analytics/analytics-zoo
|
pyzoo/test/zoo/orca/learn/spark/test_estimator_for_bigdl.py
|
Python
|
apache-2.0
| 17,513
|
[
"ORCA"
] |
da2ac2ffcfb3cb445576acb939034d9dca59f47924992ad7372361f78c5be417
|
#
# Copyright (c) 2009-2015, Jack Poulson
# All rights reserved.
#
# This file is part of Elemental and is under the BSD 2-Clause License,
# which can be found in the LICENSE file in the root directory, or at
# http://opensource.org/licenses/BSD-2-Clause
#
from El.core import *
from ctypes import CFUNCTYPE
# Special matrices
# ****************
# Deterministic
# =============
# Bull's head
# -----------
lib.ElBullsHead_c.argtypes = \
lib.ElBullsHead_z.argtypes = \
lib.ElBullsHeadDist_c.argtypes = \
lib.ElBullsHeadDist_z.argtypes = \
[c_void_p,iType]
def BullsHead(A,n):
args = [A.obj,n]
if type(A) is Matrix:
if A.tag == cTag: lib.ElBullsHead_c(*args)
elif A.tag == zTag: lib.ElBullsHead_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == cTag: lib.ElBullsHeadDist_c(*args)
elif A.tag == zTag: lib.ElBullsHeadDist_z(*args)
else: DataExcept()
else: TypeExcept()
# Cauchy
# ------
lib.ElCauchy_s.argtypes = \
lib.ElCauchyDist_s.argtypes = \
[c_void_p,iType,POINTER(sType),iType,POINTER(sType)]
lib.ElCauchy_d.argtypes = \
lib.ElCauchyDist_d.argtypes = \
[c_void_p,iType,POINTER(dType),iType,POINTER(dType)]
lib.ElCauchy_c.argtypes = \
lib.ElCauchyDist_c.argtypes = \
[c_void_p,iType,POINTER(cType),iType,POINTER(cType)]
lib.ElCauchy_z.argtypes = \
lib.ElCauchyDist_z.argtypes = \
[c_void_p,iType,POINTER(zType),iType,POINTER(zType)]
def Cauchy(A,x,y):
xLen = len(x)
yLen = len(y)
xBuf = (TagToType(A.tag)*xLen)(*x)
yBuf = (TagToType(A.tag)*yLen)(*y)
args = [A.obj,xLen,xBuf.yLen,yBuf]
if type(A) is Matrix:
if A.tag == sTag: lib.ElCauchy_s(*args)
elif A.tag == dTag: lib.ElCauchy_d(*args)
elif A.tag == cTag: lib.ElCauchy_c(*args)
elif A.tag == zTag: lib.ElCauchy_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == sTag: lib.ElCauchyDist_s(*args)
elif A.tag == dTag: lib.ElCauchyDist_d(*args)
elif A.tag == cTag: lib.ElCauchyDist_c(*args)
elif A.tag == zTag: lib.ElCauchyDist_z(*args)
else: DataExcept()
else: TypeExcept()
# Cauchy-like
# -----------
lib.ElCauchyLike_s.argtypes = \
lib.ElCauchyLikeDist_s.argtypes = \
[c_void_p,iType,POINTER(sType),iType,POINTER(sType),
iType,POINTER(sType),iType,POINTER(sType)]
lib.ElCauchyLike_d.argtypes = \
lib.ElCauchyLikeDist_d.argtypes = \
[c_void_p,iType,POINTER(dType),iType,POINTER(dType),
iType,POINTER(dType),iType,POINTER(dType)]
lib.ElCauchyLike_c.argtypes = \
lib.ElCauchyLikeDist_c.argtypes = \
[c_void_p,iType,POINTER(cType),iType,POINTER(cType),
iType,POINTER(cType),iType,POINTER(cType)]
lib.ElCauchyLike_z.argtypes = \
lib.ElCauchyLikeDist_z.argtypes = \
[c_void_p,iType,POINTER(zType),iType,POINTER(zType),
iType,POINTER(zType),iType,POINTER(zType)]
def CauchyLike(A,r,s,x,y):
rLen = len(r)
sLen = len(s)
xLen = len(x)
yLen = len(y)
rBuf = (TagToType(A.tag)*rLen)(*r)
sBuf = (TagToType(A.tag)*sLen)(*s)
xBuf = (TagToType(A.tag)*xLen)(*x)
yBuf = (TagToType(A.tag)*yLen)(*y)
args = [A.obj,rLen,rBuf,sLen,sBuf,xLen,xBuf,yLen,yBuf]
if type(A) is Matrix:
if A.tag == sTag: lib.ElCauchyLike_s(*args)
elif A.tag == dTag: lib.ElCauchyLike_d(*args)
elif A.tag == cTag: lib.ElCauchyLike_c(*args)
elif A.tag == zTag: lib.ElCauchyLike_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == sTag: lib.ElCauchyLikeDist_s(*args)
elif A.tag == dTag: lib.ElCauchyLikeDist_d(*args)
elif A.tag == cTag: lib.ElCauchyLikeDist_c(*args)
elif A.tag == zTag: lib.ElCauchyLikeDist_z(*args)
else: DataExcept()
else: TypeExcept()
# Circulant
# ---------
lib.ElCirculant_i.argtypes = \
lib.ElCirculantDist_i.argtypes = \
[c_void_p,iType,POINTER(iType)]
lib.ElCirculant_s.argtypes = \
lib.ElCirculantDist_s.argtypes = \
[c_void_p,iType,POINTER(sType)]
lib.ElCirculant_d.argtypes = \
lib.ElCirculantDist_d.argtypes = \
[c_void_p,iType,POINTER(dType)]
lib.ElCirculant_c.argtypes = \
lib.ElCirculantDist_c.argtypes = \
[c_void_p,iType,POINTER(cType)]
lib.ElCirculant_z.argtypes = \
lib.ElCirculantDist_z.argtypes = \
[c_void_p,iType,POINTER(zType)]
def Circulant(A,a):
aLen = len(a)
aBuf = (TagToType(A.tag)*aLen)(*a)
args = [A.obj,aLen,aBuf]
if type(A) is Matrix:
if A.tag == iTag: lib.ElCirculant_i(*args)
elif A.tag == sTag: lib.ElCirculant_s(*args)
elif A.tag == dTag: lib.ElCirculant_d(*args)
elif A.tag == cTag: lib.ElCirculant_c(*args)
elif A.tag == zTag: lib.ElCirculant_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == iTag: lib.ElCirculantDist_i(*args)
elif A.tag == sTag: lib.ElCirculantDist_s(*args)
elif A.tag == dTag: lib.ElCirculantDist_d(*args)
elif A.tag == cTag: lib.ElCirculantDist_c(*args)
elif A.tag == zTag: lib.ElCirculantDist_z(*args)
else: DataExcept()
else: TypeExcept()
# Demmel
# ------
lib.ElDemmel_s.argtypes = \
lib.ElDemmel_d.argtypes = \
lib.ElDemmel_c.argtypes = \
lib.ElDemmel_z.argtypes = \
lib.ElDemmelDist_s.argtypes = \
lib.ElDemmelDist_d.argtypes = \
lib.ElDemmelDist_c.argtypes = \
lib.ElDemmelDist_z.argtypes = \
[c_void_p,iType]
def Demmel(A,n):
args = [A.obj,n]
if type(A) is Matrix:
if A.tag == sTag: lib.ElDemmel_s(*args)
elif A.tag == dTag: lib.ElDemmel_d(*args)
elif A.tag == cTag: lib.ElDemmel_c(*args)
elif A.tag == zTag: lib.ElDemmel_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == sTag: lib.ElDemmelDist_s(*args)
elif A.tag == dTag: lib.ElDemmelDist_d(*args)
elif A.tag == cTag: lib.ElDemmelDist_c(*args)
elif A.tag == zTag: lib.ElDemmelDist_z(*args)
else: DataExcept()
else: TypeExcept()
# Diagonal
# --------
lib.ElDiagonal_i.argtypes = \
lib.ElDiagonal_s.argtypes = \
lib.ElDiagonal_d.argtypes = \
lib.ElDiagonal_c.argtypes = \
lib.ElDiagonal_z.argtypes = \
lib.ElDiagonalDist_i.argtypes = \
lib.ElDiagonalDist_s.argtypes = \
lib.ElDiagonalDist_d.argtypes = \
lib.ElDiagonalDist_c.argtypes = \
lib.ElDiagonalDist_z.argtypes = \
lib.ElDiagonalSparse_i.argtypes = \
lib.ElDiagonalSparse_s.argtypes = \
lib.ElDiagonalSparse_d.argtypes = \
lib.ElDiagonalSparse_c.argtypes = \
lib.ElDiagonalSparse_z.argtypes = \
lib.ElDiagonalDistSparse_i.argtypes = \
lib.ElDiagonalDistSparse_s.argtypes = \
lib.ElDiagonalDistSparse_d.argtypes = \
lib.ElDiagonalDistSparse_c.argtypes = \
lib.ElDiagonalDistSparse_z.argtypes = \
[c_void_p,c_void_p]
def Diagonal(A,d):
args = [A.obj,d.obj]
if type(A) is Matrix:
if A.tag == iTag: lib.ElDiagonal_i(*args)
elif A.tag == sTag: lib.ElDiagonal_s(*args)
elif A.tag == dTag: lib.ElDiagonal_d(*args)
elif A.tag == cTag: lib.ElDiagonal_c(*args)
elif A.tag == zTag: lib.ElDiagonal_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == iTag: lib.ElDiagonalDist_i(*args)
elif A.tag == sTag: lib.ElDiagonalDist_s(*args)
elif A.tag == dTag: lib.ElDiagonalDist_d(*args)
elif A.tag == cTag: lib.ElDiagonalDist_c(*args)
elif A.tag == zTag: lib.ElDiagonalDist_z(*args)
else: DataExcept()
elif type(A) is SparseMatrix:
if A.tag == iTag: lib.ElDiagonalSparse_i(*args)
elif A.tag == sTag: lib.ElDiagonalSparse_s(*args)
elif A.tag == dTag: lib.ElDiagonalSparse_d(*args)
elif A.tag == cTag: lib.ElDiagonalSparse_c(*args)
elif A.tag == zTag: lib.ElDiagonalSparse_z(*args)
else: DataExcept()
elif type(A) is DistSparseMatrix:
if A.tag == iTag: lib.ElDiagonalDistSparse_i(*args)
elif A.tag == sTag: lib.ElDiagonalDistSparse_s(*args)
elif A.tag == dTag: lib.ElDiagonalDistSparse_d(*args)
elif A.tag == cTag: lib.ElDiagonalDistSparse_c(*args)
elif A.tag == zTag: lib.ElDiagonalDistSparse_z(*args)
else: DataExcept()
else: TypeExcept()
# DruinskyToledo
# --------------
lib.ElDruinskyToledo_s.argtypes = \
lib.ElDruinskyToledo_d.argtypes = \
lib.ElDruinskyToledo_c.argtypes = \
lib.ElDruinskyToledo_z.argtypes = \
lib.ElDruinskyToledoDist_s.argtypes = \
lib.ElDruinskyToledoDist_d.argtypes = \
lib.ElDruinskyToledoDist_c.argtypes = \
lib.ElDruinskyToledoDist_z.argtypes = \
[c_void_p,iType]
def DruinskyToledo(A,k):
args = [A.obj,k]
if type(A) is Matrix:
if A.tag == sTag: lib.ElDruinskyToledo_s(*args)
elif A.tag == dTag: lib.ElDruinskyToledo_d(*args)
elif A.tag == cTag: lib.ElDruinskyToledo_c(*args)
elif A.tag == zTag: lib.ElDruinskyToledo_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == sTag: lib.ElDruinskyToledoDist_s(*args)
elif A.tag == dTag: lib.ElDruinskyToledoDist_d(*args)
elif A.tag == cTag: lib.ElDruinskyToledoDist_c(*args)
elif A.tag == zTag: lib.ElDruinskyToledoDist_z(*args)
else: DataExcept()
else: TypeExcept()
# Dynamic regularization counter-example
# --------------------------------------
lib.ElDynamicRegCounter_s.argtypes = \
lib.ElDynamicRegCounter_d.argtypes = \
lib.ElDynamicRegCounter_c.argtypes = \
lib.ElDynamicRegCounter_z.argtypes = \
lib.ElDynamicRegCounterDist_s.argtypes = \
lib.ElDynamicRegCounterDist_d.argtypes = \
lib.ElDynamicRegCounterDist_c.argtypes = \
lib.ElDynamicRegCounterDist_z.argtypes = \
lib.ElDynamicRegCounterSparse_s.argtypes = \
lib.ElDynamicRegCounterSparse_d.argtypes = \
lib.ElDynamicRegCounterSparse_c.argtypes = \
lib.ElDynamicRegCounterSparse_z.argtypes = \
lib.ElDynamicRegCounterDistSparse_s.argtypes = \
lib.ElDynamicRegCounterDistSparse_d.argtypes = \
lib.ElDynamicRegCounterDistSparse_c.argtypes = \
lib.ElDynamicRegCounterDistSparse_z.argtypes = \
[c_void_p,iType]
def DynamicRegCounter(A,n):
args = [A.obj,n]
if type(A) is Matrix:
if A.tag == sTag: lib.ElDynamicRegCounter_s(*args)
elif A.tag == dTag: lib.ElDynamicRegCounter_d(*args)
elif A.tag == cTag: lib.ElDynamicRegCounter_c(*args)
elif A.tag == zTag: lib.ElDynamicRegCounter_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == sTag: lib.ElDynamicRegCounterDist_s(*args)
elif A.tag == dTag: lib.ElDynamicRegCounterDist_d(*args)
elif A.tag == cTag: lib.ElDynamicRegCounterDist_c(*args)
elif A.tag == zTag: lib.ElDynamicRegCounterDist_z(*args)
else: DataExcept()
elif type(A) is SparseMatrix:
if A.tag == sTag: lib.ElDynamicRegCounterSparse_s(*args)
elif A.tag == dTag: lib.ElDynamicRegCounterSparse_d(*args)
elif A.tag == cTag: lib.ElDynamicRegCounterSparse_c(*args)
elif A.tag == zTag: lib.ElDynamicRegCounterSparse_z(*args)
else: DataExcept()
elif type(A) is DistSparseMatrix:
if A.tag == sTag: lib.ElDynamicRegCounterDistSparse_s(*args)
elif A.tag == dTag: lib.ElDynamicRegCounterDistSparse_d(*args)
elif A.tag == cTag: lib.ElDynamicRegCounterDistSparse_c(*args)
elif A.tag == zTag: lib.ElDynamicRegCounterDistSparse_z(*args)
else: DataExcept()
else: TypeExcept()
# Egorov
# ------
lib.ElEgorov_c.argtypes = \
lib.ElEgorovDist_c.argtypes = \
[c_void_p,CFUNCTYPE(sType,iType,iType),iType]
lib.ElEgorov_z.argtypes = \
lib.ElEgorovDist_z.argtypes = \
[c_void_p,CFUNCTYPE(dType,iType,iType),iType]
def Egorov(A,phase,n):
cPhase = CFUNCTYPE(TagToType(Base(A.tag)),iType,iType)(phase)
args = [A.obj,cPhase,n]
if type(A) is Matrix:
if A.tag == cTag: lib.ElEgorov_c(*args)
elif A.tag == zTag: lib.ElEgorov_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == cTag: lib.ElEgorovDist_c(*args)
elif A.tag == zTag: lib.ElEgorovDist_z(*args)
else: DataExcept()
else: TypeExcept()
# Ehrenfest
# ---------
lib.ElEhrenfest_s.argtypes = \
lib.ElEhrenfest_d.argtypes = \
lib.ElEhrenfest_c.argtypes = \
lib.ElEhrenfest_z.argtypes = \
lib.ElEhrenfestDist_s.argtypes = \
lib.ElEhrenfestDist_d.argtypes = \
lib.ElEhrenfestDist_c.argtypes = \
lib.ElEhrenfestDist_z.argtypes = \
[c_void_p,iType]
def Ehrenfest(P,n):
args = [P.obj,n]
if type(P) is Matrix:
if P.tag == sTag: lib.ElEhrenfest_s(*args)
elif P.tag == dTag: lib.ElEhrenfest_d(*args)
elif P.tag == cTag: lib.ElEhrenfest_c(*args)
elif P.tag == zTag: lib.ElEhrenfest_z(*args)
else: DataExcept()
elif type(P) is DistMatrix:
if P.tag == sTag: lib.ElEhrenfestDist_s(*args)
elif P.tag == dTag: lib.ElEhrenfestDist_d(*args)
elif P.tag == cTag: lib.ElEhrenfestDist_c(*args)
elif P.tag == zTag: lib.ElEhrenfestDist_z(*args)
else: DataExcept()
else: TypeExcept()
lib.ElEhrenfestStationary_s.argtypes = \
lib.ElEhrenfestStationary_d.argtypes = \
lib.ElEhrenfestStationary_c.argtypes = \
lib.ElEhrenfestStationary_z.argtypes = \
lib.ElEhrenfestStationaryDist_s.argtypes = \
lib.ElEhrenfestStationaryDist_d.argtypes = \
lib.ElEhrenfestStationaryDist_c.argtypes = \
lib.ElEhrenfestStationaryDist_z.argtypes = \
[c_void_p,iType]
def EhrenfestStationary(PInf,n):
args = [PInf.obj,n]
if type(PInf) is Matrix:
if PInf.tag == sTag: lib.ElEhrenfestStationary_s(*args)
elif PInf.tag == dTag: lib.ElEhrenfestStationary_d(*args)
elif PInf.tag == cTag: lib.ElEhrenfestStationary_c(*args)
elif PInf.tag == zTag: lib.ElEhrenfestStationary_z(*args)
else: DataExcept()
elif type(PInf) is DistMatrix:
if PInf.tag == sTag: lib.ElEhrenfestStationaryDist_s(*args)
elif PInf.tag == dTag: lib.ElEhrenfestStationaryDist_d(*args)
elif PInf.tag == cTag: lib.ElEhrenfestStationaryDist_c(*args)
elif PInf.tag == zTag: lib.ElEhrenfestStationaryDist_z(*args)
else: DataExcept()
else: TypeExcept()
lib.ElEhrenfestDecay_s.argtypes = \
lib.ElEhrenfestDecay_d.argtypes = \
lib.ElEhrenfestDecay_c.argtypes = \
lib.ElEhrenfestDecay_z.argtypes = \
lib.ElEhrenfestDecayDist_s.argtypes = \
lib.ElEhrenfestDecayDist_d.argtypes = \
lib.ElEhrenfestDecayDist_c.argtypes = \
lib.ElEhrenfestDecayDist_z.argtypes = \
[c_void_p,iType]
def EhrenfestDecay(PInf,n):
args = [PInf.obj,n]
if type(PInf) is Matrix:
if PInf.tag == sTag: lib.ElEhrenfestDecay_s(*args)
elif PInf.tag == dTag: lib.ElEhrenfestDecay_d(*args)
elif PInf.tag == cTag: lib.ElEhrenfestDecay_c(*args)
elif PInf.tag == zTag: lib.ElEhrenfestDecay_z(*args)
else: DataExcept()
elif type(PInf) is DistMatrix:
if PInf.tag == sTag: lib.ElEhrenfestDecayDist_s(*args)
elif PInf.tag == dTag: lib.ElEhrenfestDecayDist_d(*args)
elif PInf.tag == cTag: lib.ElEhrenfestDecayDist_c(*args)
elif PInf.tag == zTag: lib.ElEhrenfestDecayDist_z(*args)
else: DataExcept()
else: TypeExcept()
# Extended Kahan
# --------------
lib.ElExtendedKahan_s.argtypes = \
lib.ElExtendedKahan_c.argtypes = \
lib.ElExtendedKahanDist_s.argtypes = \
lib.ElExtendedKahanDist_c.argtypes = \
[c_void_p,iType,sType,sType]
lib.ElExtendedKahan_d.argtypes = \
lib.ElExtendedKahan_z.argtypes = \
lib.ElExtendedKahanDist_d.argtypes = \
lib.ElExtendedKahanDist_z.argtypes = \
[c_void_p,iType,dType,dType]
def ExtendedKahan(A,k,phi,mu):
args = [A.obj,k,phi,mu]
if type(A) is Matrix:
if A.tag == sTag: lib.ElExtendedKahan_s(*args)
elif A.tag == dTag: lib.ElExtendedKahan_d(*args)
elif A.tag == cTag: lib.ElExtendedKahan_c(*args)
elif A.tag == zTag: lib.ElExtendedKahan_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == sTag: lib.ElExtendedKahanDist_s(*args)
elif A.tag == dTag: lib.ElExtendedKahanDist_d(*args)
elif A.tag == cTag: lib.ElExtendedKahanDist_c(*args)
elif A.tag == zTag: lib.ElExtendedKahanDist_z(*args)
else: DataExcept()
else: TypeExcept()
# Fiedler
# -------
lib.ElFiedler_s.argtypes = \
lib.ElFiedlerDist_s.argtypes = \
[c_void_p,iType,POINTER(sType)]
lib.ElFiedler_d.argtypes = \
lib.ElFiedlerDist_d.argtypes = \
[c_void_p,iType,POINTER(dType)]
lib.ElFiedler_c.argtypes = \
lib.ElFiedlerDist_c.argtypes = \
[c_void_p,iType,POINTER(cType)]
lib.ElFiedler_z.argtypes = \
lib.ElFiedlerDist_z.argtypes = \
[c_void_p,iType,POINTER(zType)]
def Fiedler(A,c):
cLen = len(c)
cBuf = (TagToType(A.tag)*cLen)(*c)
args = [A.obj,cLen,cBuf]
if type(A) is Matrix:
if A.tag == sTag: lib.ElFiedler_s(*args)
elif A.tag == dTag: lib.ElFiedler_d(*args)
elif A.tag == cTag: lib.ElFiedler_c(*args)
elif A.tag == zTag: lib.ElFiedler_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == sTag: lib.ElFiedlerDist_s(*args)
elif A.tag == dTag: lib.ElFiedlerDist_d(*args)
elif A.tag == cTag: lib.ElFiedlerDist_c(*args)
elif A.tag == zTag: lib.ElFiedlerDist_z(*args)
else: DataExcept()
else: TypeExcept()
# Forsythe
# --------
lib.ElForsythe_i.argtypes = \
lib.ElForsytheDist_i.argtypes = \
[c_void_p,iType,iType,iType]
lib.ElForsythe_s.argtypes = \
lib.ElForsytheDist_s.argtypes = \
[c_void_p,iType,sType,sType]
lib.ElForsythe_d.argtypes = \
lib.ElForsytheDist_d.argtypes = \
[c_void_p,iType,dType,dType]
lib.ElForsythe_c.argtypes = \
lib.ElForsytheDist_c.argtypes = \
[c_void_p,iType,cType,cType]
lib.ElForsythe_z.argtypes = \
lib.ElForsytheDist_z.argtypes = \
[c_void_p,iType,zType,zType]
def Forsythe(J,n,alpha,lamb):
args = [A.obj,n,alpha,lamb]
if type(J) is Matrix:
if J.tag == iTag: lib.ElForsythe_i(*args)
elif J.tag == sTag: lib.ElForsythe_s(*args)
elif J.tag == dTag: lib.ElForsythe_d(*args)
elif J.tag == cTag: lib.ElForsythe_c(*args)
elif J.tag == zTag: lib.ElForsythe_z(*args)
else: DataExcept()
elif type(J) is DistMatrix:
if J.tag == iTag: lib.ElForsytheDist_i(*args)
elif J.tag == sTag: lib.ElForsytheDist_s(*args)
elif J.tag == dTag: lib.ElForsytheDist_d(*args)
elif J.tag == cTag: lib.ElForsytheDist_c(*args)
elif J.tag == zTag: lib.ElForsytheDist_z(*args)
else: DataExcept()
else: TypeExcept()
# Fox-Li
# ------
lib.ElFoxLi_c.argtypes = \
lib.ElFoxLiDist_c.argtypes = \
[c_void_p,iType,sType]
lib.ElFoxLi_z.argtypes = \
lib.ElFoxLiDist_z.argtypes = \
[c_void_p,iType,dType]
def FoxLi(A,n,omega=48.):
args = [A.obj,n,omega]
if type(A) is Matrix:
if A.tag == cTag: lib.ElFoxLi_c(*args)
elif A.tag == zTag: lib.ElFoxLi_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == cTag: lib.ElFoxLiDist_c(*args)
elif A.tag == zTag: lib.ElFoxLiDist_z(*args)
else: DataExcept()
else: TypeExcept()
# Fourier
# -------
lib.ElFourier_c.argtypes = \
lib.ElFourier_z.argtypes = \
lib.ElFourierDist_c.argtypes = \
lib.ElFourierDist_z.argtypes = \
[c_void_p,iType]
def Fourier(A,n):
args = [A.obj,n]
if type(A) is Matrix:
if A.tag == cTag: lib.ElFourier_c(*args)
elif A.tag == zTag: lib.ElFourier_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == cTag: lib.ElFourierDist_c(*args)
elif A.tag == zTag: lib.ElFourierDist_z(*args)
else: DataExcept()
else: TypeExcept()
# Fourier-Identity
# ----------------
lib.ElFourierIdentity_c.argtypes = \
lib.ElFourierIdentity_z.argtypes = \
lib.ElFourierIdentityDist_c.argtypes = \
lib.ElFourierIdentityDist_z.argtypes = \
[c_void_p,iType]
def FourierIdentity(A,n):
args = [A.obj,n]
if type(A) is Matrix:
if A.tag == cTag: lib.ElFourierIdentity_c(*args)
elif A.tag == zTag: lib.ElFourierIdentity_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == cTag: lib.ElFourierIdentityDist_c(*args)
elif A.tag == zTag: lib.ElFourierIdentityDist_z(*args)
else: DataExcept()
else: TypeExcept()
# GCD matrix
# ----------
lib.ElGCDMatrix_i.argtypes = \
lib.ElGCDMatrix_s.argtypes = \
lib.ElGCDMatrix_d.argtypes = \
lib.ElGCDMatrix_c.argtypes = \
lib.ElGCDMatrix_z.argtypes = \
lib.ElGCDMatrixDist_i.argtypes = \
lib.ElGCDMatrixDist_s.argtypes = \
lib.ElGCDMatrixDist_d.argtypes = \
lib.ElGCDMatrixDist_c.argtypes = \
lib.ElGCDMatrixDist_z.argtypes = \
[c_void_p,iType,iType]
def GCDMatrix(G,m,n):
args = [G.obj,m,n]
if type(G) is Matrix:
if G.tag == iTag: lib.ElGCDMatrix_i(*args)
elif G.tag == sTag: lib.ElGCDMatrix_s(*args)
elif G.tag == dTag: lib.ElGCDMatrix_d(*args)
elif G.tag == cTag: lib.ElGCDMatrix_c(*args)
elif G.tag == zTag: lib.ElGCDMatrix_z(*args)
else: DataExcept()
elif type(G) is DistMatrix:
if G.tag == iTag: lib.ElGCDMatrixDist_i(*args)
elif G.tag == sTag: lib.ElGCDMatrixDist_s(*args)
elif G.tag == dTag: lib.ElGCDMatrixDist_d(*args)
elif G.tag == cTag: lib.ElGCDMatrixDist_c(*args)
elif G.tag == zTag: lib.ElGCDMatrixDist_z(*args)
else: DataExcept()
else: TypeExcept()
# Gear matrix
# -----------
lib.ElGear_i.argtypes = \
lib.ElGear_s.argtypes = \
lib.ElGear_d.argtypes = \
lib.ElGear_c.argtypes = \
lib.ElGear_z.argtypes = \
lib.ElGearDist_i.argtypes = \
lib.ElGearDist_s.argtypes = \
lib.ElGearDist_d.argtypes = \
lib.ElGearDist_c.argtypes = \
lib.ElGearDist_z.argtypes = \
[c_void_p,iType,iType,iType]
def Gear(G,n,s,t):
args = [G.obj,n,s,t]
if type(G) is Matrix:
if G.tag == iTag: lib.ElGear_i(*args)
elif G.tag == sTag: lib.ElGear_s(*args)
elif G.tag == dTag: lib.ElGear_d(*args)
elif G.tag == cTag: lib.ElGear_c(*args)
elif G.tag == zTag: lib.ElGear_z(*args)
else: DataExcept()
elif type(G) is DistMatrix:
if G.tag == iTag: lib.ElGearDist_i(*args)
elif G.tag == sTag: lib.ElGearDist_s(*args)
elif G.tag == dTag: lib.ElGearDist_d(*args)
elif G.tag == cTag: lib.ElGearDist_c(*args)
elif G.tag == zTag: lib.ElGearDist_z(*args)
else: DataExcept()
else: TypeExcept()
# GEPP Growth
# -----------
lib.ElGEPPGrowth_s.argtypes = \
lib.ElGEPPGrowth_d.argtypes = \
lib.ElGEPPGrowth_c.argtypes = \
lib.ElGEPPGrowth_z.argtypes = \
lib.ElGEPPGrowthDist_s.argtypes = \
lib.ElGEPPGrowthDist_d.argtypes = \
lib.ElGEPPGrowthDist_c.argtypes = \
lib.ElGEPPGrowthDist_z.argtypes = \
[c_void_p,iType]
def GEPPGrowth(A,n):
args = [A.obj,n]
if type(A) is Matrix:
if A.tag == sTag: lib.ElGEPPGrowth_s(*args)
elif A.tag == dTag: lib.ElGEPPGrowth_d(*args)
elif A.tag == cTag: lib.ElGEPPGrowth_c(*args)
elif A.tag == zTag: lib.ElGEPPGrowth_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == sTag: lib.ElGEPPGrowthDist_s(*args)
elif A.tag == dTag: lib.ElGEPPGrowthDist_d(*args)
elif A.tag == cTag: lib.ElGEPPGrowthDist_c(*args)
elif A.tag == zTag: lib.ElGEPPGrowthDist_z(*args)
else: DataExcept()
else: TypeExcept()
# Golub/Klema/Stewart
# -------------------
lib.ElGKS_s.argtypes = \
lib.ElGKS_d.argtypes = \
lib.ElGKS_c.argtypes = \
lib.ElGKS_z.argtypes = \
lib.ElGKSDist_s.argtypes = \
lib.ElGKSDist_d.argtypes = \
lib.ElGKSDist_c.argtypes = \
lib.ElGKSDist_z.argtypes = \
[c_void_p,iType]
def GKS(A,n):
args = [A.obj,n]
if type(A) is Matrix:
if A.tag == sTag: lib.ElGKS_s(*args)
elif A.tag == dTag: lib.ElGKS_d(*args)
elif A.tag == cTag: lib.ElGKS_c(*args)
elif A.tag == zTag: lib.ElGKS_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == sTag: lib.ElGKSDist_s(*args)
elif A.tag == dTag: lib.ElGKSDist_d(*args)
elif A.tag == cTag: lib.ElGKSDist_c(*args)
elif A.tag == zTag: lib.ElGKSDist_z(*args)
else: DataExcept()
else: TypeExcept()
# Grcar
# -----
lib.ElGrcar_i.argtypes = \
lib.ElGrcar_s.argtypes = \
lib.ElGrcar_d.argtypes = \
lib.ElGrcar_c.argtypes = \
lib.ElGrcar_z.argtypes = \
lib.ElGrcarDist_i.argtypes = \
lib.ElGrcarDist_s.argtypes = \
lib.ElGrcarDist_d.argtypes = \
lib.ElGrcarDist_c.argtypes = \
lib.ElGrcarDist_z.argtypes = \
[c_void_p,iType,iType]
def Grcar(A,n,k=3):
args = [A.obj,n,k]
if type(A) is Matrix:
if A.tag == iTag: lib.ElGrcar_i(*args)
elif A.tag == sTag: lib.ElGrcar_s(*args)
elif A.tag == dTag: lib.ElGrcar_d(*args)
elif A.tag == cTag: lib.ElGrcar_c(*args)
elif A.tag == zTag: lib.ElGrcar_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == iTag: lib.ElGrcarDist_i(*args)
elif A.tag == sTag: lib.ElGrcarDist_s(*args)
elif A.tag == dTag: lib.ElGrcarDist_d(*args)
elif A.tag == cTag: lib.ElGrcarDist_c(*args)
elif A.tag == zTag: lib.ElGrcarDist_z(*args)
else: DataExcept()
else: TypeExcept()
# Haar
# ----
lib.ElHaar_s.argtypes = \
lib.ElHaar_d.argtypes = \
lib.ElHaar_c.argtypes = \
lib.ElHaar_z.argtypes = \
lib.ElHaarDist_s.argtypes = \
lib.ElHaarDist_d.argtypes = \
lib.ElHaarDist_c.argtypes = \
lib.ElHaarDist_z.argtypes = \
[c_void_p,iType]
def Haar(A,n):
args = [A.obj,n]
if type(A) is Matrix:
if A.tag == sTag: lib.ElHaar_s(*args)
elif A.tag == dTag: lib.ElHaar_d(*args)
elif A.tag == cTag: lib.ElHaar_c(*args)
elif A.tag == zTag: lib.ElHaar_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == sTag: lib.ElHaarDist_s(*args)
elif A.tag == dTag: lib.ElHaarDist_d(*args)
elif A.tag == cTag: lib.ElHaarDist_c(*args)
elif A.tag == zTag: lib.ElHaarDist_z(*args)
else: DataExcept()
else: TypeExcept()
lib.ElImplicitHaar_s.argtypes = \
lib.ElImplicitHaar_d.argtypes = \
lib.ElImplicitHaar_c.argtypes = \
lib.ElImplicitHaar_z.argtypes = \
lib.ElImplicitHaarDist_s.argtypes = \
lib.ElImplicitHaarDist_d.argtypes = \
lib.ElImplicitHaarDist_c.argtypes = \
lib.ElImplicitHaarDist_z.argtypes = \
[c_void_p,c_void_p,c_void_p,iType]
def ImplicitHaar(A,n):
if type(A) is Matrix:
t = Matrix(A.tag)
d = Matrix(Base(A.tag))
args = [A.obj,t.obj,d.obj,n]
if A.tag == sTag: lib.ElImplicitHaar_s(*args)
elif A.tag == dTag: lib.ElImplicitHaar_d(*args)
elif A.tag == cTag: lib.ElImplicitHaar_c(*args)
elif A.tag == zTag: lib.ElImplicitHaar_z(*args)
else: DataExcept()
return t, d
elif type(A) is DistMatrix:
t = DistMatrix(A.tag,MC,STAR,A.Grid())
d = DistMatrix(Base(A.tag),MC,STAR,A.Grid())
args = [A.obj,t.obj,d.obj,n]
if A.tag == sTag: lib.ElImplicitHaarDist_s(*args)
elif A.tag == dTag: lib.ElImplicitHaarDist_d(*args)
elif A.tag == cTag: lib.ElImplicitHaarDist_c(*args)
elif A.tag == zTag: lib.ElImplicitHaarDist_z(*args)
else: DataExcept()
return t, d
else: TypeExcept()
# Hankel
# ------
lib.ElHankel_i.argtypes = \
lib.ElHankelDist_i.argtypes = \
[c_void_p,iType,iType,iType,POINTER(iType)]
lib.ElHankel_s.argtypes = \
lib.ElHankelDist_s.argtypes = \
[c_void_p,iType,iType,iType,POINTER(sType)]
lib.ElHankel_d.argtypes = \
lib.ElHankelDist_d.argtypes = \
[c_void_p,iType,iType,iType,POINTER(dType)]
lib.ElHankel_c.argtypes = \
lib.ElHankelDist_c.argtypes = \
[c_void_p,iType,iType,iType,POINTER(cType)]
lib.ElHankel_z.argtypes = \
lib.ElHankelDist_z.argtypes = \
[c_void_p,iType,iType,iType,POINTER(zType)]
def Hankel(A,m,n,a):
aLen = len(a)
aBuf = (TagToType(A.tag)*aLen)(*a)
args = [A.obj,m,n,aLen,aBuf]
if type(A) is Matrix:
if A.tag == iTag: lib.ElHankel_i(*args)
elif A.tag == sTag: lib.ElHankel_s(*args)
elif A.tag == dTag: lib.ElHankel_d(*args)
elif A.tag == cTag: lib.ElHankel_c(*args)
elif A.tag == zTag: lib.ElHankel_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == iTag: lib.ElHankelDist_i(*args)
elif A.tag == sTag: lib.ElHankelDist_s(*args)
elif A.tag == dTag: lib.ElHankelDist_d(*args)
elif A.tag == cTag: lib.ElHankelDist_c(*args)
elif A.tag == zTag: lib.ElHankelDist_z(*args)
else: DataExcept()
else: TypeExcept()
# Hanowa
# ------
lib.ElHanowa_i.argtypes = \
lib.ElHanowaDist_i.argtypes = \
[c_void_p,iType,iType]
lib.ElHanowa_s.argtypes = \
lib.ElHanowaDist_s.argtypes = \
[c_void_p,iType,sType]
lib.ElHanowa_d.argtypes = \
lib.ElHanowaDist_d.argtypes = \
[c_void_p,iType,dType]
lib.ElHanowa_c.argtypes = \
lib.ElHanowaDist_c.argtypes = \
[c_void_p,iType,cType]
lib.ElHanowa_z.argtypes = \
lib.ElHanowaDist_z.argtypes = \
[c_void_p,iType,zType]
def Hanowa(A,n,mu):
args = [A.obj,n,mu]
if type(A) is Matrix:
if A.tag == iTag: lib.ElHanowa_i(*args)
elif A.tag == sTag: lib.ElHanowa_s(*args)
elif A.tag == dTag: lib.ElHanowa_d(*args)
elif A.tag == cTag: lib.ElHanowa_c(*args)
elif A.tag == zTag: lib.ElHanowa_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == iTag: lib.ElHanowaDist_i(*args)
elif A.tag == sTag: lib.ElHanowaDist_s(*args)
elif A.tag == dTag: lib.ElHanowaDist_d(*args)
elif A.tag == cTag: lib.ElHanowaDist_c(*args)
elif A.tag == zTag: lib.ElHanowaDist_z(*args)
else: DataExcept()
else: TypeExcept()
# Hatano-Nelson
# -------------
lib.ElHatanoNelson_s.argtypes = \
lib.ElHatanoNelsonDist_s.argtypes = \
[c_void_p,iType,sType,sType,sType,bType]
lib.ElHatanoNelson_d.argtypes = \
lib.ElHatanoNelsonDist_d.argtypes = \
[c_void_p,iType,dType,dType,dType,bType]
lib.ElHatanoNelson_c.argtypes = \
lib.ElHatanoNelsonDist_c.argtypes = \
[c_void_p,iType,cType,sType,cType,bType]
lib.ElHatanoNelson_z.argtypes = \
lib.ElHatanoNelsonDist_z.argtypes = \
[c_void_p,iType,zType,dType,zType,bType]
def HatanoNelson(A,n,center,radius,g,periodic=True):
args = [A.obj,n,center,radius,g,periodic]
if type(A) is Matrix:
if A.tag == sTag: lib.ElHatanoNelson_s(*args)
elif A.tag == dTag: lib.ElHatanoNelson_d(*args)
elif A.tag == cTag: lib.ElHatanoNelson_c(*args)
elif A.tag == zTag: lib.ElHatanoNelson_z(*args)
else: DataExcept()
elif tyep(A) is DistMatrix:
if A.tag == sTag: lib.ElHatanoNelsonDist_s(*args)
elif A.tag == dTag: lib.ElHatanoNelsonDist_d(*args)
elif A.tag == cTag: lib.ElHatanoNelsonDist_c(*args)
elif A.tag == zTag: lib.ElHatanoNelsonDist_z(*args)
else: DataExcept()
else: TypeExcept()
# Helmholtz
# ---------
lib.ElHelmholtz1D_s.argtypes = \
lib.ElHelmholtz1DDist_s.argtypes = \
lib.ElHelmholtz1DSparse_s.argtypes = \
lib.ElHelmholtz1DDistSparse_s.argtypes = \
[c_void_p,iType,sType]
lib.ElHelmholtz1D_d.argtypes = \
lib.ElHelmholtz1DDist_d.argtypes = \
lib.ElHelmholtz1DSparse_d.argtypes = \
lib.ElHelmholtz1DDistSparse_d.argtypes = \
[c_void_p,iType,dType]
lib.ElHelmholtz1D_c.argtypes = \
lib.ElHelmholtz1DDist_c.argtypes = \
lib.ElHelmholtz1DSparse_c.argtypes = \
lib.ElHelmholtz1DDistSparse_c.argtypes = \
[c_void_p,iType,cType]
lib.ElHelmholtz1D_z.argtypes = \
lib.ElHelmholtz1DDist_z.argtypes = \
lib.ElHelmholtz1DSparse_z.argtypes = \
lib.ElHelmholtz1DDistSparse_z.argtypes = \
[c_void_p,iType,zType]
def Helmholtz1D(H,nx,shift):
args = [H.obj,nx,shift]
if type(A) is Matrix:
if A.tag == sTag: lib.ElHelmholtz1D_s(*args)
elif A.tag == dTag: lib.ElHelmholtz1D_d(*args)
elif A.tag == cTag: lib.ElHelmholtz1D_c(*args)
elif A.tag == zTag: lib.ElHelmholtz1D_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == sTag: lib.ElHelmholtz1DDist_s(*args)
elif A.tag == dTag: lib.ElHelmholtz1DDist_d(*args)
elif A.tag == cTag: lib.ElHelmholtz1DDist_c(*args)
elif A.tag == zTag: lib.ElHelmholtz1DDist_z(*args)
else: DataExcept()
elif type(A) is SparseMatrix:
if A.tag == sTag: lib.ElHelmholtz1DSparse_s(*args)
elif A.tag == dTag: lib.ElHelmholtz1DSparse_d(*args)
elif A.tag == cTag: lib.ElHelmholtz1DSparse_c(*args)
elif A.tag == zTag: lib.ElHelmholtz1DSparse_z(*args)
else: DataExcept()
elif type(A) is DistSparseMatrix:
if A.tag == sTag: lib.ElHelmholtz1DDistSparse_s(*args)
elif A.tag == dTag: lib.ElHelmholtz1DDistSparse_d(*args)
elif A.tag == cTag: lib.ElHelmholtz1DDistSparse_c(*args)
elif A.tag == zTag: lib.ElHelmholtz1DDistSparse_z(*args)
else: DataExcept()
else: TypeExcept()
lib.ElHelmholtz2D_s.argtypes = \
lib.ElHelmholtz2DDist_s.argtypes = \
lib.ElHelmholtz2DSparse_s.argtypes = \
lib.ElHelmholtz2DDistSparse_s.argtypes = \
[c_void_p,iType,iType,sType]
lib.ElHelmholtz2D_d.argtypes = \
lib.ElHelmholtz2DDist_d.argtypes = \
lib.ElHelmholtz2DSparse_d.argtypes = \
lib.ElHelmholtz2DDistSparse_d.argtypes = \
[c_void_p,iType,iType,dType]
lib.ElHelmholtz2D_c.argtypes = \
lib.ElHelmholtz2DDist_c.argtypes = \
lib.ElHelmholtz2DSparse_c.argtypes = \
lib.ElHelmholtz2DDistSparse_c.argtypes = \
[c_void_p,iType,iType,cType]
lib.ElHelmholtz2D_z.argtypes = \
lib.ElHelmholtz2DDist_z.argtypes = \
lib.ElHelmholtz2DSparse_z.argtypes = \
lib.ElHelmholtz2DDistSparse_z.argtypes = \
[c_void_p,iType,iType,zType]
def Helmholtz2D(H,nx,ny,shift):
args = [H.obj,nx,ny,shift]
if type(A) is Matrix:
if A.tag == sTag: lib.ElHelmholtz2D_s(*args)
elif A.tag == dTag: lib.ElHelmholtz2D_d(*args)
elif A.tag == cTag: lib.ElHelmholtz2D_c(*args)
elif A.tag == zTag: lib.ElHelmholtz2D_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == sTag: lib.ElHelmholtz2DDist_s(*args)
elif A.tag == dTag: lib.ElHelmholtz2DDist_d(*args)
elif A.tag == cTag: lib.ElHelmholtz2DDist_c(*args)
elif A.tag == zTag: lib.ElHelmholtz2DDist_z(*args)
else: DataExcept()
elif type(A) is SparseMatrix:
if A.tag == sTag: lib.ElHelmholtz2DSparse_s(*args)
elif A.tag == dTag: lib.ElHelmholtz2DSparse_d(*args)
elif A.tag == cTag: lib.ElHelmholtz2DSparse_c(*args)
elif A.tag == zTag: lib.ElHelmholtz2DSparse_z(*args)
else: DataExcept()
elif type(A) is DistSparseMatrix:
if A.tag == sTag: lib.ElHelmholtz2DSparse_s(*args)
elif A.tag == dTag: lib.ElHelmholtz2DSparse_d(*args)
elif A.tag == cTag: lib.ElHelmholtz2DSparse_c(*args)
elif A.tag == zTag: lib.ElHelmholtz2DSparse_z(*args)
else: DataExcept()
else: TypeExcept()
lib.ElHelmholtz3D_s.argtypes = \
lib.ElHelmholtz3DDist_s.argtypes = \
[c_void_p,iType,iType,iType,sType]
lib.ElHelmholtz3D_d.argtypes = \
lib.ElHelmholtz3DDist_d.argtypes = \
[c_void_p,iType,iType,iType,dType]
lib.ElHelmholtz3D_c.argtypes = \
lib.ElHelmholtz3DDist_c.argtypes = \
[c_void_p,iType,iType,iType,cType]
lib.ElHelmholtz3D_z.argtypes = \
lib.ElHelmholtz3DDist_z.argtypes = \
[c_void_p,iType,iType,iType,zType]
def Helmholtz3D(H,nx,ny,nz,shift):
args = [H.obj,nx,ny,nz,shift]
if type(A) is Matrix:
if A.tag == sTag: lib.ElHelmholtz3D_s(*args)
elif A.tag == dTag: lib.ElHelmholtz3D_d(*args)
elif A.tag == cTag: lib.ElHelmholtz3D_c(*args)
elif A.tag == zTag: lib.ElHelmholtz3D_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == sTag: lib.ElHelmholtz3DDist_s(*args)
elif A.tag == dTag: lib.ElHelmholtz3DDist_d(*args)
elif A.tag == cTag: lib.ElHelmholtz3DDist_c(*args)
elif A.tag == zTag: lib.ElHelmholtz3DDist_z(*args)
else: DataExcept()
elif type(A) is SparseMatrix:
if A.tag == sTag: lib.ElHelmholtz3DSparse_s(*args)
elif A.tag == dTag: lib.ElHelmholtz3DSparse_d(*args)
elif A.tag == cTag: lib.ElHelmholtz3DSparse_c(*args)
elif A.tag == zTag: lib.ElHelmholtz3DSparse_z(*args)
else: DataExcept()
elif type(A) is DistSparseMatrix:
if A.tag == sTag: lib.ElHelmholtz3DDistSparse_s(*args)
elif A.tag == dTag: lib.ElHelmholtz3DDistSparse_d(*args)
elif A.tag == cTag: lib.ElHelmholtz3DDistSparse_c(*args)
elif A.tag == zTag: lib.ElHelmholtz3DDistSparse_z(*args)
else: DataExcept()
else: TypeExcept()
# Helmholtz with PML
# ------------------
lib.ElHelmholtzPML1D_c.argtypes = \
lib.ElHelmholtzPML1DDist_c.argtypes = \
lib.ElHelmholtzPML1DSparse_c.argtypes = \
lib.ElHelmholtzPML1DDistSparse_c.argtypes = \
[c_void_p,iType,cType,iType,sType,sType]
lib.ElHelmholtzPML1D_z.argtypes = \
lib.ElHelmholtzPML1DDist_z.argtypes = \
lib.ElHelmholtzPML1DSparse_z.argtypes = \
lib.ElHelmholtzPML1DDistSparse_z.argtypes = \
[c_void_p,iType,zType,iType,dType,dType]
def HelmholtzPML1D(H,nx,omega,numPml,sigma,pmlExp):
args = [H.obj,nx,omega,numPml,sigma,pmlExp]
if type(A) is Matrix:
if A.tag == cTag: lib.ElHelmholtzPML1D_c(*args)
elif A.tag == zTag: lib.ElHelmholtzPML1D_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == cTag: lib.ElHelmholtzPML1DDist_c(*args)
elif A.tag == zTag: lib.ElHelmholtzPML1DDist_z(*args)
else: DataExcept()
elif type(A) is SparseMatrix:
if A.tag == cTag: lib.ElHelmholtzPML1DSparse_c(*args)
elif A.tag == zTag: lib.ElHelmholtzPML1DSparse_z(*args)
else: DataExcept()
elif type(A) is DistSparseMatrix:
if A.tag == cTag: lib.ElHelmholtzPML1DDistSparse_c(*args)
elif A.tag == zTag: lib.ElHelmholtzPML1DDistSparse_z(*args)
else: DataExcept()
else: TypeExcept()
lib.ElHelmholtzPML2D_c.argtypes = \
lib.ElHelmholtzPML2DDist_c.argtypes = \
lib.ElHelmholtzPML2DSparse_c.argtypes = \
lib.ElHelmholtzPML2DDistSparse_c.argtypes = \
[c_void_p,iType,iType,cType,iType,sType,sType]
lib.ElHelmholtzPML2D_z.argtypes = \
lib.ElHelmholtzPML2DDist_z.argtypes = \
lib.ElHelmholtzPML2DSparse_z.argtypes = \
lib.ElHelmholtzPML2DDistSparse_z.argtypes = \
[c_void_p,iType,iType,zType,iType,dType,dType]
def HelmholtzPML2D(H,nx,ny,omega,numPml,sigma,pmlExp):
args = [H.obj,nx,ny,omega,numPml,sigma,pmlExp]
if type(A) is Matrix:
if A.tag == cTag: lib.ElHelmholtzPML2D_c(*args)
elif A.tag == zTag: lib.ElHelmholtzPML2D_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == cTag: lib.ElHelmholtzPML2DDist_c(*args)
elif A.tag == zTag: lib.ElHelmholtzPML2DDist_z(*args)
else: DataExcept()
elif type(A) is SparseMatrix:
if A.tag == cTag: lib.ElHelmholtzPML2DSparse_c(*args)
elif A.tag == zTag: lib.ElHelmholtzPML2DSparse_z(*args)
else: DataExcept()
elif type(A) is DistSparseMatrix:
if A.tag == cTag: lib.ElHelmholtzPML2DDistSparse_c(*args)
elif A.tag == zTag: lib.ElHelmholtzPML2DDistSparse_z(*args)
else: DataExcept()
else: TypeExcept()
lib.ElHelmholtzPML3D_c.argtypes = \
lib.ElHelmholtzPML3DDist_c.argtypes = \
lib.ElHelmholtzPML3DSparse_c.argtypes = \
lib.ElHelmholtzPML3DDistSparse_c.argtypes = \
[c_void_p,iType,iType,iType,cType,iType,sType,sType]
lib.ElHelmholtzPML3D_z.argtypes = \
lib.ElHelmholtzPML3DDist_z.argtypes = \
lib.ElHelmholtzPML3DSparse_z.argtypes = \
lib.ElHelmholtzPML3DDistSparse_z.argtypes = \
[c_void_p,iType,iType,iType,zType,iType,dType,dType]
def HelmholtzPML3D(H,nx,ny,nz,omega,numPml,sigma,pmlExp):
args = [H.obj,nx,ny,nz,omega,numPml,sigma,pmlExp]
if type(A) is Matrix:
if A.tag == cTag: lib.ElHelmholtzPML3D_c(*args)
elif A.tag == zTag: lib.ElHelmholtzPML3D_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == cTag: lib.ElHelmholtzPML3DDist_c(*args)
elif A.tag == zTag: lib.ElHelmholtzPML3DDist_z(*args)
else: DataExcept()
elif type(A) is SparseMatrix:
if A.tag == cTag: lib.ElHelmholtzPML3DSparse_c(*args)
elif A.tag == zTag: lib.ElHelmholtzPML3DSparse_z(*args)
else: DataExcept()
elif type(A) is DistSparseMatrix:
if A.tag == cTag: lib.ElHelmholtzPML3DDistSparse_c(*args)
elif A.tag == zTag: lib.ElHelmholtzPML3DDistSparse_z(*args)
else: DataExcept()
else: TypeExcept()
# Hermitian from EVD
# ------------------
lib.ElHermitianFromEVD_s.argtypes = \
lib.ElHermitianFromEVD_d.argtypes = \
lib.ElHermitianFromEVD_c.argtypes = \
lib.ElHermitianFromEVD_z.argtypes = \
lib.ElHermitianFromEVDDist_s.argtypes = \
lib.ElHermitianFromEVDDist_d.argtypes = \
lib.ElHermitianFromEVDDist_c.argtypes = \
lib.ElHermitianFromEVDDist_z.argtypes = \
[c_uint,c_void_p,c_void_p,c_void_p]
def HermitianFromEVD(uplo,A,w,Z):
if type(A) is not type(w) or type(w) is not type(Z):
raise Exception('Types of {A,w,Z} must match')
if A.tag != Z.tag:
raise Exception('Datatypes of A and Z must match')
if w.tag != Base(Z.tag):
raise Exception('w must be of the base datatype of Z')
args = [uplo,A.obj,w.obj,Z.obj]
if type(Z) is Matrix:
if Z.tag == sTag: lib.ElHermitianFromEVD_s(*args)
elif Z.tag == dTag: lib.ElHermitianFromEVD_d(*args)
elif Z.tag == cTag: lib.ElHermitianFromEVD_c(*args)
elif Z.tag == zTag: lib.ElHermitianFromEVD_z(*args)
else: DataExcept()
elif type(Z) is DistMatrix:
if Z.tag == sTag: lib.ElHermitianFromEVDDist_s(*args)
elif Z.tag == dTag: lib.ElHermitianFromEVDDist_d(*args)
elif Z.tag == cTag: lib.ElHermitianFromEVDDist_c(*args)
elif Z.tag == zTag: lib.ElHermitianFromEVDDist_z(*args)
else: DataExcept()
else: TypeExcept()
# Hermitian uniform spectrum
# --------------------------
lib.ElHermitianUniformSpectrum_s.argtypes = \
lib.ElHermitianUniformSpectrum_c.argtypes = \
lib.ElHermitianUniformSpectrumDist_s.argtypes = \
lib.ElHermitianUniformSpectrumDist_c.argtypes = \
[c_void_p,iType,sType,sType]
lib.ElHermitianUniformSpectrum_d.argtypes = \
lib.ElHermitianUniformSpectrum_z.argtypes = \
lib.ElHermitianUniformSpectrumDist_d.argtypes = \
lib.ElHermitianUniformSpectrumDist_z.argtypes = \
[c_void_p,iType,dType,dType]
def HermitianUniformSpectrum(A,n,lower=0,upper=1):
args = [A.obj,n,lower,upper]
if type(A) is Matrix:
if A.tag == sTag: lib.ElHermitianUniformSpectrum_s(*args)
elif A.tag == dTag: lib.ElHermitianUniformSpectrum_d(*args)
elif A.tag == cTag: lib.ElHermitianUniformSpectrum_c(*args)
elif A.tag == zTag: lib.ElHermitianUniformSpectrum_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == sTag: lib.ElHermitianUniformSpectrumDist_s(*args)
elif A.tag == dTag: lib.ElHermitianUniformSpectrumDist_d(*args)
elif A.tag == cTag: lib.ElHermitianUniformSpectrumDist_c(*args)
elif A.tag == zTag: lib.ElHermitianUniformSpectrumDist_z(*args)
else: DataExcept()
else: TypeExcept()
# Hilbert
# -------
lib.ElHilbert_s.argtypes = \
lib.ElHilbert_d.argtypes = \
lib.ElHilbert_c.argtypes = \
lib.ElHilbert_z.argtypes = \
lib.ElHilbertDist_s.argtypes = \
lib.ElHilbertDist_d.argtypes = \
lib.ElHilbertDist_c.argtypes = \
lib.ElHilbertDist_z.argtypes = \
[c_void_p,iType]
def Hilbert(A,n):
args = [A.obj,n]
if type(A) is Matrix:
if A.tag == sTag: lib.ElHilbert_s(*args)
elif A.tag == dTag: lib.ElHilbert_d(*args)
elif A.tag == cTag: lib.ElHilbert_c(*args)
elif A.tag == zTag: lib.ElHilbert_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == sTag: lib.ElHilbertDist_s(*args)
elif A.tag == dTag: lib.ElHilbertDist_d(*args)
elif A.tag == cTag: lib.ElHilbertDist_c(*args)
elif A.tag == zTag: lib.ElHilbertDist_z(*args)
else: DataExcept()
else: TypeExcept()
# Identity
# --------
lib.ElIdentity_i.argtypes = \
lib.ElIdentity_s.argtypes = \
lib.ElIdentity_d.argtypes = \
lib.ElIdentity_c.argtypes = \
lib.ElIdentity_z.argtypes = \
lib.ElIdentityDist_i.argtypes = \
lib.ElIdentityDist_s.argtypes = \
lib.ElIdentityDist_d.argtypes = \
lib.ElIdentityDist_c.argtypes = \
lib.ElIdentityDist_z.argtypes = \
lib.ElIdentitySparse_i.argtypes = \
lib.ElIdentitySparse_s.argtypes = \
lib.ElIdentitySparse_d.argtypes = \
lib.ElIdentitySparse_c.argtypes = \
lib.ElIdentitySparse_z.argtypes = \
lib.ElIdentityDistSparse_i.argtypes = \
lib.ElIdentityDistSparse_s.argtypes = \
lib.ElIdentityDistSparse_d.argtypes = \
lib.ElIdentityDistSparse_c.argtypes = \
lib.ElIdentityDistSparse_z.argtypes = \
[c_void_p,iType,iType]
def Identity(A,m,n):
args = [A.obj,m,n]
if type(A) is Matrix:
if A.tag == iTag: lib.ElIdentity_i(*args)
elif A.tag == sTag: lib.ElIdentity_s(*args)
elif A.tag == dTag: lib.ElIdentity_d(*args)
elif A.tag == cTag: lib.ElIdentity_c(*args)
elif A.tag == zTag: lib.ElIdentity_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == iTag: lib.ElIdentityDist_i(*args)
elif A.tag == sTag: lib.ElIdentityDist_s(*args)
elif A.tag == dTag: lib.ElIdentityDist_d(*args)
elif A.tag == cTag: lib.ElIdentityDist_c(*args)
elif A.tag == zTag: lib.ElIdentityDist_z(*args)
else: DataExcept()
elif type(A) is SparseMatrix:
if A.tag == iTag: lib.ElIdentitySparse_i(*args)
elif A.tag == sTag: lib.ElIdentitySparse_s(*args)
elif A.tag == dTag: lib.ElIdentitySparse_d(*args)
elif A.tag == cTag: lib.ElIdentitySparse_c(*args)
elif A.tag == zTag: lib.ElIdentitySparse_z(*args)
else: DataExcept()
elif type(A) is DistSparseMatrix:
if A.tag == iTag: lib.ElIdentityDistSparse_i(*args)
elif A.tag == sTag: lib.ElIdentityDistSparse_s(*args)
elif A.tag == dTag: lib.ElIdentityDistSparse_d(*args)
elif A.tag == cTag: lib.ElIdentityDistSparse_c(*args)
elif A.tag == zTag: lib.ElIdentityDistSparse_z(*args)
else: DataExcept()
else: TypeExcept()
# Jordan
# ------
lib.ElJordan_i.argtypes = \
lib.ElJordanDist_i.argtypes = \
[c_void_p,iType,iType]
lib.ElJordan_s.argtypes = \
lib.ElJordanDist_s.argtypes = \
[c_void_p,iType,sType]
lib.ElJordan_d.argtypes = \
lib.ElJordanDist_d.argtypes = \
[c_void_p,iType,dType]
lib.ElJordan_c.argtypes = \
lib.ElJordanDist_c.argtypes = \
[c_void_p,iType,cType]
lib.ElJordan_z.argtypes = \
lib.ElJordanDist_z.argtypes = \
[c_void_p,iType,zType]
def Jordan(J,n,lambPre):
lamb = TagToType(J.tag)(lambPre)
args = [J.obj,n,lamb]
if type(J) is Matrix:
if J.tag == iTag: lib.ElJordan_i(*args)
elif J.tag == sTag: lib.ElJordan_s(*args)
elif J.tag == dTag: lib.ElJordan_d(*args)
elif J.tag == cTag: lib.ElJordan_c(*args)
elif J.tag == zTag: lib.ElJordan_z(*args)
else: DataExcept()
elif type(J) is DistMatrix:
if J.tag == iTag: lib.ElJordanDist_i(*args)
elif J.tag == sTag: lib.ElJordanDist_s(*args)
elif J.tag == dTag: lib.ElJordanDist_d(*args)
elif J.tag == cTag: lib.ElJordanDist_c(*args)
elif J.tag == zTag: lib.ElJordanDist_z(*args)
else: DataExcept()
else: TypeExcept()
# Jordan-Cholesky
# ---------------
lib.ElJordanCholesky_s.argtypes = \
lib.ElJordanCholesky_d.argtypes = \
lib.ElJordanCholesky_c.argtypes = \
lib.ElJordanCholesky_z.argtypes = \
lib.ElJordanCholeskyDist_s.argtypes = \
lib.ElJordanCholeskyDist_d.argtypes = \
lib.ElJordanCholeskyDist_c.argtypes = \
lib.ElJordanCholeskyDist_z.argtypes = \
lib.ElJordanCholeskySparse_s.argtypes = \
lib.ElJordanCholeskySparse_d.argtypes = \
lib.ElJordanCholeskySparse_c.argtypes = \
lib.ElJordanCholeskySparse_z.argtypes = \
lib.ElJordanCholeskyDistSparse_s.argtypes = \
lib.ElJordanCholeskyDistSparse_d.argtypes = \
lib.ElJordanCholeskyDistSparse_c.argtypes = \
lib.ElJordanCholeskyDistSparse_z.argtypes = \
[c_void_p,iType]
def JordanCholesky(A,n):
args = [A.obj,n]
if type(A) is Matrix:
if A.tag == sTag: lib.ElJordanCholesky_s(*args)
elif A.tag == dTag: lib.ElJordanCholesky_d(*args)
elif A.tag == cTag: lib.ElJordanCholesky_c(*args)
elif A.tag == zTag: lib.ElJordanCholesky_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == sTag: lib.ElJordanCholeskyDist_s(*args)
elif A.tag == dTag: lib.ElJordanCholeskyDist_d(*args)
elif A.tag == cTag: lib.ElJordanCholeskyDist_c(*args)
elif A.tag == zTag: lib.ElJordanCholeskyDist_z(*args)
else: DataExcept()
elif type(A) is SparseMatrix:
if A.tag == sTag: lib.ElJordanCholeskySparse_s(*args)
elif A.tag == dTag: lib.ElJordanCholeskySparse_d(*args)
elif A.tag == cTag: lib.ElJordanCholeskySparse_c(*args)
elif A.tag == zTag: lib.ElJordanCholeskySparse_z(*args)
else: DataExcept()
elif type(A) is DistSparseMatrix:
if A.tag == sTag: lib.ElJordanCholeskyDistSparse_s(*args)
elif A.tag == dTag: lib.ElJordanCholeskyDistSparse_d(*args)
elif A.tag == cTag: lib.ElJordanCholeskyDistSparse_c(*args)
elif A.tag == zTag: lib.ElJordanCholeskyDistSparse_z(*args)
else: DataExcept()
else: TypeExcept()
# Kahan
# -----
lib.ElKahan_s.argtypes = \
lib.ElKahanDist_s.argtypes = \
[c_void_p,iType,sType]
lib.ElKahan_d.argtypes = \
lib.ElKahanDist_d.argtypes = \
[c_void_p,iType,dType]
lib.ElKahan_c.argtypes = \
lib.ElKahanDist_c.argtypes = \
[c_void_p,iType,cType]
lib.ElKahan_z.argtypes = \
lib.ElKahanDist_z.argtypes = \
[c_void_p,iType,zType]
def Kahan(A,n,phi):
args = [A.obj,n,phi]
if type(A) is Matrix:
if A.tag == sTag: lib.ElKahan_s(*args)
elif A.tag == dTag: lib.ElKahan_d(*args)
elif A.tag == cTag: lib.ElKahan_c(*args)
elif A.tag == zTag: lib.ElKahan_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == sTag: lib.ElKahanDist_s(*args)
elif A.tag == dTag: lib.ElKahanDist_d(*args)
elif A.tag == cTag: lib.ElKahanDist_c(*args)
elif A.tag == zTag: lib.ElKahanDist_z(*args)
else: DataExcept()
else: TypeExcept()
# KMS
# ---
lib.ElKMS_i.argtypes = \
lib.ElKMSDist_i.argtypes = \
[c_void_p,iType,iType]
lib.ElKMS_s.argtypes = \
lib.ElKMSDist_s.argtypes = \
[c_void_p,iType,sType]
lib.ElKMS_d.argtypes = \
lib.ElKMSDist_d.argtypes = \
[c_void_p,iType,dType]
lib.ElKMS_c.argtypes = \
lib.ElKMSDist_c.argtypes = \
[c_void_p,iType,cType]
lib.ElKMS_z.argtypes = \
lib.ElKMSDist_z.argtypes = \
[c_void_p,iType,zType]
def KMS(K,n,rho):
args = [K.obj,n,rho]
if type(K) is Matrix:
if K.tag == iTag: lib.ElKMS_i(*args)
elif K.tag == sTag: lib.ElKMS_s(*args)
elif K.tag == dTag: lib.ElKMS_d(*args)
elif K.tag == cTag: lib.ElKMS_c(*args)
elif K.tag == zTag: lib.ElKMS_z(*args)
else: DataExcept()
elif type(K) is DistMatrix:
if K.tag == iTag: lib.ElKMSDist_i(*args)
elif K.tag == sTag: lib.ElKMSDist_s(*args)
elif K.tag == dTag: lib.ElKMSDist_d(*args)
elif K.tag == cTag: lib.ElKMSDist_c(*args)
elif K.tag == zTag: lib.ElKMSDist_z(*args)
else: DataExcept()
else: TypeExcept()
# Laplacian
# ---------
lib.ElLaplacian1D_s.argtypes = \
lib.ElLaplacian1D_d.argtypes = \
lib.ElLaplacian1D_c.argtypes = \
lib.ElLaplacian1D_z.argtypes = \
lib.ElLaplacian1DDist_s.argtypes = \
lib.ElLaplacian1DDist_d.argtypes = \
lib.ElLaplacian1DDist_c.argtypes = \
lib.ElLaplacian1DDist_z.argtypes = \
lib.ElLaplacian1DSparse_s.argtypes = \
lib.ElLaplacian1DSparse_d.argtypes = \
lib.ElLaplacian1DSparse_c.argtypes = \
lib.ElLaplacian1DSparse_z.argtypes = \
lib.ElLaplacian1DDistSparse_s.argtypes = \
lib.ElLaplacian1DDistSparse_d.argtypes = \
lib.ElLaplacian1DDistSparse_c.argtypes = \
lib.ElLaplacian1DDistSparse_z.argtypes = \
[c_void_p,iType]
def Laplacian1D(L,nx):
args = [L.obj,nx]
if type(L) is Matrix:
if L.tag == sTag: lib.ElLaplacian1D_s(*args)
elif L.tag == dTag: lib.ElLaplacian1D_d(*args)
elif L.tag == cTag: lib.ElLaplacian1D_c(*args)
elif L.tag == zTag: lib.ElLaplacian1D_z(*args)
else: DataExcept()
elif type(L) is DistMatrix:
if L.tag == sTag: lib.ElLaplacian1DDist_s(*args)
elif L.tag == dTag: lib.ElLaplacian1DDist_d(*args)
elif L.tag == cTag: lib.ElLaplacian1DDist_c(*args)
elif L.tag == zTag: lib.ElLaplacian1DDist_z(*args)
else: DataExcept()
elif type(L) is SparseMatrix:
if L.tag == sTag: lib.ElLaplacian1DSparse_s(*args)
elif L.tag == dTag: lib.ElLaplacian1DSparse_d(*args)
elif L.tag == cTag: lib.ElLaplacian1DSparse_c(*args)
elif L.tag == zTag: lib.ElLaplacian1DSparse_z(*args)
else: DataExcept()
elif type(L) is DistSparseMatrix:
if L.tag == sTag: lib.ElLaplacian1DDistSparse_s(*args)
elif L.tag == dTag: lib.ElLaplacian1DDistSparse_d(*args)
elif L.tag == cTag: lib.ElLaplacian1DDistSparse_c(*args)
elif L.tag == zTag: lib.ElLaplacian1DDistSparse_z(*args)
else: DataExcept()
else: TypeExcept()
# LEFT OFF HERE (TODO: Add sparse wrappers)
lib.ElLaplacian2D_s.argtypes = \
lib.ElLaplacian2D_d.argtypes = \
lib.ElLaplacian2D_c.argtypes = \
lib.ElLaplacian2D_z.argtypes = \
lib.ElLaplacian2DDist_s.argtypes = \
lib.ElLaplacian2DDist_d.argtypes = \
lib.ElLaplacian2DDist_c.argtypes = \
lib.ElLaplacian2DDist_z.argtypes = \
[c_void_p,iType,iType]
def Laplacian2D(L,nx,ny):
args = [L.obj,nx,ny]
if type(L) is Matrix:
if L.tag == sTag: lib.ElLaplacian2D_s(*args)
elif L.tag == dTag: lib.ElLaplacian2D_d(*args)
elif L.tag == cTag: lib.ElLaplacian2D_c(*args)
elif L.tag == zTag: lib.ElLaplacian2D_z(*args)
else: DataExcept()
elif type(L) is DistMatrix:
if L.tag == sTag: lib.ElLaplacian2DDist_s(*args)
elif L.tag == dTag: lib.ElLaplacian2DDist_d(*args)
elif L.tag == cTag: lib.ElLaplacian2DDist_c(*args)
elif L.tag == zTag: lib.ElLaplacian2DDist_z(*args)
else: DataExcept()
else: TypeExcept()
lib.ElLaplacian3D_s.argtypes = \
lib.ElLaplacian3D_d.argtypes = \
lib.ElLaplacian3D_c.argtypes = \
lib.ElLaplacian3D_z.argtypes = \
lib.ElLaplacian3DDist_s.argtypes = \
lib.ElLaplacian3DDist_d.argtypes = \
lib.ElLaplacian3DDist_c.argtypes = \
lib.ElLaplacian3DDist_z.argtypes = \
[c_void_p,iType,iType,iType]
def Laplacian3D(L,nx,ny,nz):
args = [L.obj,nx,ny,nz]
if type(L) is Matrix:
if L.tag == sTag: lib.ElLaplacian3D_s(*args)
elif L.tag == dTag: lib.ElLaplacian3D_d(*args)
elif L.tag == cTag: lib.ElLaplacian3D_c(*args)
elif L.tag == zTag: lib.ElLaplacian3D_z(*args)
else: DataExcept()
elif type(L) is DistMatrix:
if L.tag == sTag: lib.ElLaplacian3DDist_s(*args)
elif L.tag == dTag: lib.ElLaplacian3DDist_d(*args)
elif L.tag == cTag: lib.ElLaplacian3DDist_c(*args)
elif L.tag == zTag: lib.ElLaplacian3DDist_z(*args)
else: DataExcept()
else: TypeExcept()
# Lauchli
# -------
lib.ElLauchli_i.argtypes = \
lib.ElLauchliDist_i.argtypes = \
[c_void_p,iType,iType]
lib.ElLauchli_s.argtypes = \
lib.ElLauchliDist_s.argtypes = \
[c_void_p,iType,sType]
lib.ElLauchli_d.argtypes = \
lib.ElLauchliDist_d.argtypes = \
[c_void_p,iType,dType]
lib.ElLauchli_c.argtypes = \
lib.ElLauchliDist_c.argtypes = \
[c_void_p,iType,cType]
lib.ElLauchli_z.argtypes = \
lib.ElLauchliDist_z.argtypes = \
[c_void_p,iType,zType]
def Lauchli(A,n,mu):
args = [A.obj,n,mu]
if type(A) is Matrix:
if A.tag == iTag: lib.ElLauchli_i(*args)
elif A.tag == sTag: lib.ElLauchli_s(*args)
elif A.tag == dTag: lib.ElLauchli_d(*args)
elif A.tag == cTag: lib.ElLauchli_c(*args)
elif A.tag == zTag: lib.ElLauchli_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == iTag: lib.ElLauchliDist_i(*args)
elif A.tag == sTag: lib.ElLauchliDist_s(*args)
elif A.tag == dTag: lib.ElLauchliDist_d(*args)
elif A.tag == cTag: lib.ElLauchliDist_c(*args)
elif A.tag == zTag: lib.ElLauchliDist_z(*args)
else: DataExcept()
else: TypeExcept()
# Legendre
# --------
lib.ElLegendre_s.argtypes = \
lib.ElLegendre_d.argtypes = \
lib.ElLegendre_c.argtypes = \
lib.ElLegendre_z.argtypes = \
lib.ElLegendreDist_s.argtypes = \
lib.ElLegendreDist_d.argtypes = \
lib.ElLegendreDist_c.argtypes = \
lib.ElLegendreDist_z.argtypes = \
[c_void_p,iType]
def Legendre(A,n):
args = [A.obj,n]
if type(A) is Matrix:
if A.tag == sTag: lib.ElLegendre_s(*args)
elif A.tag == dTag: lib.ElLegendre_d(*args)
elif A.tag == cTag: lib.ElLegendre_c(*args)
elif A.tag == zTag: lib.ElLegendre_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == sTag: lib.ElLegendreDist_s(*args)
elif A.tag == dTag: lib.ElLegendreDist_d(*args)
elif A.tag == cTag: lib.ElLegendreDist_c(*args)
elif A.tag == zTag: lib.ElLegendreDist_z(*args)
else: DataExcept()
else: TypeExcept()
# Lehmer
# ------
lib.ElLehmer_s.argtypes = \
lib.ElLehmer_d.argtypes = \
lib.ElLehmer_c.argtypes = \
lib.ElLehmer_z.argtypes = \
lib.ElLehmerDist_s.argtypes = \
lib.ElLehmerDist_d.argtypes = \
lib.ElLehmerDist_c.argtypes = \
lib.ElLehmerDist_z.argtypes = \
[c_void_p,iType]
def Lehmer(L,n):
args = [L.obj,n]
if type(L) is Matrix:
if L.tag == sTag: lib.ElLehmer_s(*args)
elif L.tag == dTag: lib.ElLehmer_d(*args)
elif L.tag == cTag: lib.ElLehmer_c(*args)
elif L.tag == zTag: lib.ElLehmer_z(*args)
else: DataExcept()
elif type(L) is DistMatrix:
if L.tag == sTag: lib.ElLehmerDist_s(*args)
elif L.tag == dTag: lib.ElLehmerDist_d(*args)
elif L.tag == cTag: lib.ElLehmerDist_c(*args)
elif L.tag == zTag: lib.ElLehmerDist_z(*args)
else: DataExcept()
else: TypeExcept()
# Lotkin
# ------
lib.ElLotkin_s.argtypes = \
lib.ElLotkin_d.argtypes = \
lib.ElLotkin_c.argtypes = \
lib.ElLotkin_z.argtypes = \
lib.ElLotkinDist_s.argtypes = \
lib.ElLotkinDist_d.argtypes = \
lib.ElLotkinDist_c.argtypes = \
lib.ElLotkinDist_z.argtypes = \
[c_void_p,iType]
def Lotkin(A,n):
args = [A.obj,n]
if type(A) is Matrix:
if A.tag == sTag: lib.ElLotkin_s(*args)
elif A.tag == dTag: lib.ElLotkin_d(*args)
elif A.tag == cTag: lib.ElLotkin_c(*args)
elif A.tag == zTag: lib.ElLotkin_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == sTag: lib.ElLotkinDist_s(*args)
elif A.tag == dTag: lib.ElLotkinDist_d(*args)
elif A.tag == cTag: lib.ElLotkinDist_c(*args)
elif A.tag == zTag: lib.ElLotkinDist_z(*args)
else: DataExcept()
else: TypeExcept()
# MinIJ
# -----
lib.ElMinIJ_i.argtypes = \
lib.ElMinIJ_s.argtypes = \
lib.ElMinIJ_d.argtypes = \
lib.ElMinIJ_c.argtypes = \
lib.ElMinIJ_z.argtypes = \
lib.ElMinIJDist_i.argtypes = \
lib.ElMinIJDist_s.argtypes = \
lib.ElMinIJDist_d.argtypes = \
lib.ElMinIJDist_c.argtypes = \
lib.ElMinIJDist_z.argtypes = \
[c_void_p,iType]
def MinIJ(A,n):
args = [A.obj,n]
if type(A) is Matrix:
if A.tag == iTag: lib.ElMinIJ_i(*args)
elif A.tag == sTag: lib.ElMinIJ_s(*args)
elif A.tag == dTag: lib.ElMinIJ_d(*args)
elif A.tag == cTag: lib.ElMinIJ_c(*args)
elif A.tag == zTag: lib.ElMinIJ_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == iTag: lib.ElMinIJDist_i(*args)
elif A.tag == sTag: lib.ElMinIJDist_s(*args)
elif A.tag == dTag: lib.ElMinIJDist_d(*args)
elif A.tag == cTag: lib.ElMinIJDist_c(*args)
elif A.tag == zTag: lib.ElMinIJDist_z(*args)
else: DataExcept()
else: TypeExcept()
# Normal from EVD
# ---------------
lib.ElNormalFromEVD_c.argtypes = \
lib.ElNormalFromEVD_z.argtypes = \
lib.ElNormalFromEVDDist_c.argtypes = \
lib.ElNormalFromEVDDist_z.argtypes = \
[c_void_p,c_void_p,c_void_p]
def NormalFromEVD(A,w,Z):
if type(A) is not type(w): raise Exception('Types of A and w must match')
if type(A) is not type(Z): raise Exception('Types of A and Z must match')
if Z.tag != A.tag: raise Exception('Datatypes of A and Z must match')
if w.tag != Base(A.tag): raise Exception('Base datatype of A must match w')
args = [A.obj,w.obj,Z.obj]
if type(A) is Matrix:
if A.tag == cTag: lib.ElNormalFromEVD_c(*args)
elif A.tag == zTag: lib.ElNormalFromEVD_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == cTag: lib.ElNormalFromEVDDist_c(*args)
elif A.tag == zTag: lib.ElNormalFromEVDDist_z(*args)
else: DataExcept()
else: TypeExcept()
# Ones
# ----
lib.ElOnes_i.argtypes = \
lib.ElOnes_s.argtypes = \
lib.ElOnes_d.argtypes = \
lib.ElOnes_c.argtypes = \
lib.ElOnes_z.argtypes = \
lib.ElOnesDist_i.argtypes = \
lib.ElOnesDist_s.argtypes = \
lib.ElOnesDist_d.argtypes = \
lib.ElOnesDist_c.argtypes = \
lib.ElOnesDist_z.argtypes = \
lib.ElOnesDistMultiVec_i.argtypes = \
lib.ElOnesDistMultiVec_s.argtypes = \
lib.ElOnesDistMultiVec_d.argtypes = \
lib.ElOnesDistMultiVec_c.argtypes = \
lib.ElOnesDistMultiVec_z.argtypes = \
lib.ElOnesSparse_i.argtypes = \
lib.ElOnesSparse_s.argtypes = \
lib.ElOnesSparse_d.argtypes = \
lib.ElOnesSparse_c.argtypes = \
lib.ElOnesSparse_z.argtypes = \
lib.ElOnesDistSparse_i.argtypes = \
lib.ElOnesDistSparse_s.argtypes = \
lib.ElOnesDistSparse_d.argtypes = \
lib.ElOnesDistSparse_c.argtypes = \
lib.ElOnesDistSparse_z.argtypes = \
[c_void_p,iType,iType]
def Ones(A,m,n):
args = [A.obj,m,n]
if type(A) is Matrix:
if A.tag == iTag: lib.ElOnes_i(*args)
elif A.tag == sTag: lib.ElOnes_s(*args)
elif A.tag == dTag: lib.ElOnes_d(*args)
elif A.tag == cTag: lib.ElOnes_c(*args)
elif A.tag == zTag: lib.ElOnes_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == iTag: lib.ElOnesDist_i(*args)
elif A.tag == sTag: lib.ElOnesDist_s(*args)
elif A.tag == dTag: lib.ElOnesDist_d(*args)
elif A.tag == cTag: lib.ElOnesDist_c(*args)
elif A.tag == zTag: lib.ElOnesDist_z(*args)
else: DataExcept()
elif type(A) is DistMultiVec:
if A.tag == iTag: lib.ElOnesDistMultiVec_i(*args)
elif A.tag == sTag: lib.ElOnesDistMultiVec_s(*args)
elif A.tag == dTag: lib.ElOnesDistMultiVec_d(*args)
elif A.tag == cTag: lib.ElOnesDistMultiVec_c(*args)
elif A.tag == zTag: lib.ElOnesDistMultiVec_z(*args)
else: DataExcept()
elif type(A) is SparseMatrix:
if A.tag == iTag: lib.ElOnesSparse_i(*args)
elif A.tag == sTag: lib.ElOnesSparse_s(*args)
elif A.tag == dTag: lib.ElOnesSparse_d(*args)
elif A.tag == cTag: lib.ElOnesSparse_c(*args)
elif A.tag == zTag: lib.ElOnesSparse_z(*args)
else: DataExcept()
elif type(A) is DistSparseMatrix:
if A.tag == iTag: lib.ElOnesDistSparse_i(*args)
elif A.tag == sTag: lib.ElOnesDistSparse_s(*args)
elif A.tag == dTag: lib.ElOnesDistSparse_d(*args)
elif A.tag == cTag: lib.ElOnesDistSparse_c(*args)
elif A.tag == zTag: lib.ElOnesDistSparse_z(*args)
else: DataExcept()
else: TypeExcept()
# 1-2-1 matrix
# ------------
lib.ElOneTwoOne_i.argtypes = \
lib.ElOneTwoOne_s.argtypes = \
lib.ElOneTwoOne_d.argtypes = \
lib.ElOneTwoOne_c.argtypes = \
lib.ElOneTwoOne_z.argtypes = \
lib.ElOneTwoOneDist_i.argtypes = \
lib.ElOneTwoOneDist_s.argtypes = \
lib.ElOneTwoOneDist_d.argtypes = \
lib.ElOneTwoOneDist_c.argtypes = \
lib.ElOneTwoOneDist_z.argtypes = \
[c_void_p,iType]
def OneTwoOne(A,n):
args = [A.obj,n]
if type(A) is Matrix:
if A.tag == iTag: lib.ElOneTwoOne_i(*args)
elif A.tag == sTag: lib.ElOneTwoOne_s(*args)
elif A.tag == dTag: lib.ElOneTwoOne_d(*args)
elif A.tag == cTag: lib.ElOneTwoOne_c(*args)
elif A.tag == zTag: lib.ElOneTwoOne_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == iTag: lib.ElOneTwoOneDist_i(*args)
elif A.tag == sTag: lib.ElOneTwoOneDist_s(*args)
elif A.tag == dTag: lib.ElOneTwoOneDist_d(*args)
elif A.tag == cTag: lib.ElOneTwoOneDist_c(*args)
elif A.tag == zTag: lib.ElOneTwoOneDist_z(*args)
else: DataExcept()
else: TypeExcept()
# Parter
# ------
lib.ElParter_s.argtypes = \
lib.ElParter_d.argtypes = \
lib.ElParter_c.argtypes = \
lib.ElParter_z.argtypes = \
lib.ElParterDist_s.argtypes = \
lib.ElParterDist_d.argtypes = \
lib.ElParterDist_c.argtypes = \
lib.ElParterDist_z.argtypes = \
[c_void_p,iType]
def Parter(A,n):
args = [A.obj,n]
if type(A) is Matrix:
if A.tag == sTag: lib.ElParter_s(*args)
elif A.tag == dTag: lib.ElParter_d(*args)
elif A.tag == cTag: lib.ElParter_c(*args)
elif A.tag == zTag: lib.ElParter_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == sTag: lib.ElParterDist_s(*args)
elif A.tag == dTag: lib.ElParterDist_d(*args)
elif A.tag == cTag: lib.ElParterDist_c(*args)
elif A.tag == zTag: lib.ElParterDist_z(*args)
else: DataExcept()
else: TypeExcept()
# Pei
# ---
lib.ElPei_s.argtypes = \
lib.ElPeiDist_s.argtypes = \
[c_void_p,iType,sType]
lib.ElPei_d.argtypes = \
lib.ElPeiDist_d.argtypes = \
[c_void_p,iType,dType]
lib.ElPei_c.argtypes = \
lib.ElPeiDist_c.argtypes = \
[c_void_p,iType,cType]
lib.ElPei_z.argtypes = \
lib.ElPeiDist_z.argtypes = \
[c_void_p,iType,zType]
def Pei(A,n,alpha):
args = [A.obj,n,alpha]
if type(A) is Matrix:
if A.tag == sTag: lib.ElPei_s(*args)
elif A.tag == dTag: lib.ElPei_d(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == sTag: lib.ElPeiDist_s(*args)
elif A.tag == dTag: lib.ElPeiDist_d(*args)
else: DataExcept()
else: TypeExcept()
# Redheffer
# ---------
lib.ElRedheffer_i.argtypes = \
lib.ElRedheffer_s.argtypes = \
lib.ElRedheffer_d.argtypes = \
lib.ElRedheffer_c.argtypes = \
lib.ElRedheffer_z.argtypes = \
lib.ElRedhefferDist_i.argtypes = \
lib.ElRedhefferDist_s.argtypes = \
lib.ElRedhefferDist_d.argtypes = \
lib.ElRedhefferDist_c.argtypes = \
lib.ElRedhefferDist_z.argtypes = \
[c_void_p,iType]
def Redheffer(A,n):
args = [A.obj,n]
if type(A) is Matrix:
if A.tag == iTag: lib.ElRedheffer_i(*args)
elif A.tag == sTag: lib.ElRedheffer_s(*args)
elif A.tag == dTag: lib.ElRedheffer_d(*args)
elif A.tag == cTag: lib.ElRedheffer_c(*args)
elif A.tag == zTag: lib.ElRedheffer_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == iTag: lib.ElRedhefferDist_i(*args)
elif A.tag == sTag: lib.ElRedhefferDist_s(*args)
elif A.tag == dTag: lib.ElRedhefferDist_d(*args)
elif A.tag == cTag: lib.ElRedhefferDist_c(*args)
elif A.tag == zTag: lib.ElRedhefferDist_z(*args)
else: DataExcept()
else: TypeExcept()
# Riffle
# ------
lib.ElRiffle_s.argtypes = \
lib.ElRiffle_d.argtypes = \
lib.ElRiffle_c.argtypes = \
lib.ElRiffle_z.argtypes = \
lib.ElRiffleDist_s.argtypes = \
lib.ElRiffleDist_d.argtypes = \
lib.ElRiffleDist_c.argtypes = \
lib.ElRiffleDist_z.argtypes = \
[c_void_p,iType]
def Riffle(P,n):
args = [P.obj,n]
if type(P) is Matrix:
if P.tag == sTag: lib.ElRiffle_s(*args)
elif P.tag == dTag: lib.ElRiffle_d(*args)
elif P.tag == cTag: lib.ElRiffle_c(*args)
elif P.tag == zTag: lib.ElRiffle_z(*args)
else: DataExcept()
elif type(P) is DistMatrix:
if P.tag == sTag: lib.ElRiffleDist_s(*args)
elif P.tag == dTag: lib.ElRiffleDist_d(*args)
elif P.tag == cTag: lib.ElRiffleDist_c(*args)
elif P.tag == zTag: lib.ElRiffleDist_z(*args)
else: DataExcept()
else: TypeExcept()
lib.ElRiffleStationary_s.argtypes = \
lib.ElRiffleStationary_d.argtypes = \
lib.ElRiffleStationary_c.argtypes = \
lib.ElRiffleStationary_z.argtypes = \
lib.ElRiffleStationaryDist_s.argtypes = \
lib.ElRiffleStationaryDist_d.argtypes = \
lib.ElRiffleStationaryDist_c.argtypes = \
lib.ElRiffleStationaryDist_z.argtypes = \
[c_void_p,iType]
def RiffleStationary(P,n):
args = [P.obj,n]
if type(P) is Matrix:
if P.tag == sTag: lib.ElRiffleStationary_s(*args)
elif P.tag == dTag: lib.ElRiffleStationary_d(*args)
elif P.tag == cTag: lib.ElRiffleStationary_c(*args)
elif P.tag == zTag: lib.ElRiffleStationary_z(*args)
else: DataExcept()
elif type(P) is DistMatrix:
if P.tag == sTag: lib.ElRiffleStationaryDist_s(*args)
elif P.tag == dTag: lib.ElRiffleStationaryDist_d(*args)
elif P.tag == cTag: lib.ElRiffleStationaryDist_c(*args)
elif P.tag == zTag: lib.ElRiffleStationaryDist_z(*args)
else: DataExcept()
else: TypeExcept()
lib.ElRiffleDecay_s.argtypes = \
lib.ElRiffleDecay_d.argtypes = \
lib.ElRiffleDecay_c.argtypes = \
lib.ElRiffleDecay_z.argtypes = \
lib.ElRiffleDecayDist_s.argtypes = \
lib.ElRiffleDecayDist_d.argtypes = \
lib.ElRiffleDecayDist_c.argtypes = \
lib.ElRiffleDecayDist_z.argtypes = \
[c_void_p,iType]
def RiffleDecay(P,n):
args = [P.obj,n]
if type(P) is Matrix:
if P.tag == sTag: lib.ElRiffleDecay_s(*args)
elif P.tag == dTag: lib.ElRiffleDecay_d(*args)
elif P.tag == cTag: lib.ElRiffleDecay_c(*args)
elif P.tag == zTag: lib.ElRiffleDecay_z(*args)
else: DataExcept()
elif type(P) is DistMatrix:
if P.tag == sTag: lib.ElRiffleDecayDist_s(*args)
elif P.tag == dTag: lib.ElRiffleDecayDist_d(*args)
elif P.tag == cTag: lib.ElRiffleDecayDist_c(*args)
elif P.tag == zTag: lib.ElRiffleDecayDist_z(*args)
else: DataExcept()
else: TypeExcept()
# Ris
# ---
lib.ElRis_s.argtypes = \
lib.ElRis_d.argtypes = \
lib.ElRis_c.argtypes = \
lib.ElRis_z.argtypes = \
lib.ElRisDist_s.argtypes = \
lib.ElRisDist_d.argtypes = \
lib.ElRisDist_c.argtypes = \
lib.ElRisDist_z.argtypes = \
[c_void_p,iType]
def Ris(A,n):
args = [A.obj,n]
if type(A) is Matrix:
if A.tag == sTag: lib.ElRis_s(*args)
elif A.tag == dTag: lib.ElRis_d(*args)
elif A.tag == cTag: lib.ElRis_c(*args)
elif A.tag == zTag: lib.ElRis_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == sTag: lib.ElRisDist_s(*args)
elif A.tag == dTag: lib.ElRisDist_d(*args)
elif A.tag == cTag: lib.ElRisDist_c(*args)
elif A.tag == zTag: lib.ElRisDist_z(*args)
else: DataExcept()
else: TypeExcept()
# Toeplitz
# --------
lib.ElToeplitz_i.argtypes = [c_void_p,iType,iType,iType,POINTER(iType)]
lib.ElToeplitzDist_i.argtypes = [c_void_p,iType,iType,iType,POINTER(iType)]
lib.ElToeplitz_s.argtypes = [c_void_p,iType,iType,iType,POINTER(sType)]
lib.ElToeplitzDist_s.argtypes = [c_void_p,iType,iType,iType,POINTER(sType)]
lib.ElToeplitz_d.argtypes = [c_void_p,iType,iType,iType,POINTER(dType)]
lib.ElToeplitzDist_d.argtypes = [c_void_p,iType,iType,iType,POINTER(dType)]
lib.ElToeplitz_c.argtypes = [c_void_p,iType,iType,iType,POINTER(cType)]
lib.ElToeplitzDist_c.argtypes = [c_void_p,iType,iType,iType,POINTER(cType)]
lib.ElToeplitz_z.argtypes = [c_void_p,iType,iType,iType,POINTER(zType)]
lib.ElToeplitzDist_z.argtypes = [c_void_p,iType,iType,iType,POINTER(zType)]
def Toeplitz(A,m,n,a):
aLen = len(a)
aBuf = (TagToType(A.tag)*aLen)(*a)
args = [A.obj,m,n,aLen,aBuf]
if type(A) is Matrix:
if A.tag == iTag: lib.ElToeplitz_i(*args)
elif A.tag == sTag: lib.ElToeplitz_s(*args)
elif A.tag == dTag: lib.ElToeplitz_d(*args)
elif A.tag == cTag: lib.ElToeplitz_c(*args)
elif A.tag == zTag: lib.ElToeplitz_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == iTag: lib.ElToeplitzDist_i(*args)
elif A.tag == sTag: lib.ElToeplitzDist_s(*args)
elif A.tag == dTag: lib.ElToeplitzDist_d(*args)
elif A.tag == cTag: lib.ElToeplitzDist_c(*args)
elif A.tag == zTag: lib.ElToeplitzDist_z(*args)
else: DataExcept()
else: TypeExcept()
# Trefethen-Embree
# ----------------
lib.ElTrefethenEmbree_c.argtypes = \
lib.ElTrefethenEmbree_z.argtypes = \
lib.ElTrefethenEmbreeDist_c.argtypes = \
lib.ElTrefethenEmbreeDist_z.argtypes = \
[c_void_p,iType]
def TrefethenEmbree(A,n):
args = [A.obj,n]
if type(A) is Matrix:
if A.tag == cTag: lib.ElTrefethenEmbree_c(*args)
elif A.tag == zTag: lib.ElTrefethenEmbree_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == cTag: lib.ElTrefethenEmbreeDist_c(*args)
elif A.tag == zTag: lib.ElTrefethenEmbreeDist_z(*args)
else: DataExcept()
else: TypeExcept()
# Triangle
# --------
lib.ElTriangle_c.argtypes = \
lib.ElTriangle_z.argtypes = \
lib.ElTriangleDist_c.argtypes = \
lib.ElTriangleDist_z.argtypes = \
[c_void_p,iType]
def Triangle(A,n):
args = [A.obj,n]
if type(A) is Matrix:
if A.tag == cTag: lib.ElTriangle_c(*args)
elif A.tag == zTag: lib.ElTriangle_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == cTag: lib.ElTriangleDist_c(*args)
elif A.tag == zTag: lib.ElTriangleDist_z(*args)
else: DataExcept()
else: TypeExcept()
# TriW
# ----
lib.ElTriW_i.argtypes = [c_void_p,iType,iType,iType]
lib.ElTriW_s.argtypes = [c_void_p,iType,sType,iType]
lib.ElTriW_d.argtypes = [c_void_p,iType,dType,iType]
lib.ElTriW_c.argtypes = [c_void_p,iType,cType,iType]
lib.ElTriW_z.argtypes = [c_void_p,iType,zType,iType]
lib.ElTriWDist_i.argtypes = [c_void_p,iType,iType,iType]
lib.ElTriWDist_s.argtypes = [c_void_p,iType,sType,iType]
lib.ElTriWDist_d.argtypes = [c_void_p,iType,dType,iType]
lib.ElTriWDist_c.argtypes = [c_void_p,iType,cType,iType]
lib.ElTriWDist_z.argtypes = [c_void_p,iType,zType,iType]
def TriW(A,n,alpha,k):
args = [A.obj,n,alpha,k]
if type(A) is Matrix:
if A.tag == iTag: lib.ElTriW_i(*args)
elif A.tag == sTag: lib.ElTriW_s(*args)
elif A.tag == dTag: lib.ElTriW_d(*args)
elif A.tag == cTag: lib.ElTriW_c(*args)
elif A.tag == zTag: lib.ElTriW_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == iTag: lib.ElTriWDist_i(*args)
elif A.tag == sTag: lib.ElTriWDist_s(*args)
elif A.tag == dTag: lib.ElTriWDist_d(*args)
elif A.tag == cTag: lib.ElTriWDist_c(*args)
elif A.tag == zTag: lib.ElTriWDist_z(*args)
else: DataExcept()
else: TypeExcept()
# Walsh
# -----
lib.ElWalsh_i.argtypes = \
lib.ElWalsh_s.argtypes = \
lib.ElWalsh_d.argtypes = \
lib.ElWalsh_c.argtypes = \
lib.ElWalsh_z.argtypes = \
lib.ElWalshDist_i.argtypes = \
lib.ElWalshDist_s.argtypes = \
lib.ElWalshDist_d.argtypes = \
lib.ElWalshDist_c.argtypes = \
lib.ElWalshDist_z.argtypes = \
[c_void_p,iType,bType]
def Walsh(A,k,binary=False):
args = [A.obj,k,binary]
if type(A) is Matrix:
if A.tag == iTag: lib.ElWalsh_i(*args)
elif A.tag == sTag: lib.ElWalsh_s(*args)
elif A.tag == dTag: lib.ElWalsh_d(*args)
elif A.tag == cTag: lib.ElWalsh_c(*args)
elif A.tag == zTag: lib.ElWalsh_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == iTag: lib.ElWalshDist_i(*args)
elif A.tag == sTag: lib.ElWalshDist_s(*args)
elif A.tag == dTag: lib.ElWalshDist_d(*args)
elif A.tag == cTag: lib.ElWalshDist_c(*args)
elif A.tag == zTag: lib.ElWalshDist_z(*args)
else: DataExcept()
else: TypeExcept()
# Walsh-Identity
# --------------
lib.ElWalshIdentity_i.argtypes = \
lib.ElWalshIdentity_s.argtypes = \
lib.ElWalshIdentity_d.argtypes = \
lib.ElWalshIdentity_c.argtypes = \
lib.ElWalshIdentity_z.argtypes = \
lib.ElWalshIdentityDist_i.argtypes = \
lib.ElWalshIdentityDist_s.argtypes = \
lib.ElWalshIdentityDist_d.argtypes = \
lib.ElWalshIdentityDist_c.argtypes = \
lib.ElWalshIdentityDist_z.argtypes = \
[c_void_p,iType,bType]
def WalshIdentity(A,k,binary=False):
args = [A.obj,k,binary]
if type(A) is Matrix:
if A.tag == iTag: lib.ElWalshIdentity_i(*args)
elif A.tag == sTag: lib.ElWalshIdentity_s(*args)
elif A.tag == dTag: lib.ElWalshIdentity_d(*args)
elif A.tag == cTag: lib.ElWalshIdentity_c(*args)
elif A.tag == zTag: lib.ElWalshIdentity_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == iTag: lib.ElWalshIdentityDist_i(*args)
elif A.tag == sTag: lib.ElWalshIdentityDist_s(*args)
elif A.tag == dTag: lib.ElWalshIdentityDist_d(*args)
elif A.tag == cTag: lib.ElWalshIdentityDist_c(*args)
elif A.tag == zTag: lib.ElWalshIdentityDist_z(*args)
else: DataExcept()
else: TypeExcept()
# Whale
# -----
lib.ElWhale_c.argtypes = \
lib.ElWhale_z.argtypes = \
lib.ElWhaleDist_c.argtypes = \
lib.ElWhaleDist_z.argtypes = \
[c_void_p,iType]
def Whale(A,n):
args = [A.obj,n]
if type(A) is Matrix:
if A.tag == cTag: lib.ElWhale_c(*args)
elif A.tag == zTag: lib.ElWhale_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == cTag: lib.ElWhaleDist_c(*args)
elif A.tag == zTag: lib.ElWhaleDist_z(*args)
else: DataExcept()
else: TypeExcept()
# Wilkinson
# ---------
lib.ElWilkinson_i.argtypes = \
lib.ElWilkinson_s.argtypes = \
lib.ElWilkinson_d.argtypes = \
lib.ElWilkinson_c.argtypes = \
lib.ElWilkinson_z.argtypes = \
lib.ElWilkinsonDist_i.argtypes = \
lib.ElWilkinsonDist_s.argtypes = \
lib.ElWilkinsonDist_d.argtypes = \
lib.ElWilkinsonDist_c.argtypes = \
lib.ElWilkinsonDist_z.argtypes = \
[c_void_p,iType]
def Wilkinson(A,k):
args = [A.obj,k]
if type(A) is Matrix:
if A.tag == iTag: lib.ElWilkinson_i(*args)
elif A.tag == sTag: lib.ElWilkinson_s(*args)
elif A.tag == dTag: lib.ElWilkinson_d(*args)
elif A.tag == cTag: lib.ElWilkinson_c(*args)
elif A.tag == zTag: lib.ElWilkinson_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == iTag: lib.ElWilkinsonDist_i(*args)
elif A.tag == sTag: lib.ElWilkinsonDist_s(*args)
elif A.tag == dTag: lib.ElWilkinsonDist_d(*args)
elif A.tag == cTag: lib.ElWilkinsonDist_c(*args)
elif A.tag == zTag: lib.ElWilkinsonDist_z(*args)
else: DataExcept()
else: TypeExcept()
# Zeros
# -----
lib.ElZeros_i.argtypes = \
lib.ElZeros_s.argtypes = \
lib.ElZeros_d.argtypes = \
lib.ElZeros_c.argtypes = \
lib.ElZeros_z.argtypes = \
lib.ElZerosDist_i.argtypes = \
lib.ElZerosDist_s.argtypes = \
lib.ElZerosDist_d.argtypes = \
lib.ElZerosDist_c.argtypes = \
lib.ElZerosDist_z.argtypes = \
lib.ElZerosSparse_i.argtypes = \
lib.ElZerosSparse_s.argtypes = \
lib.ElZerosSparse_d.argtypes = \
lib.ElZerosSparse_c.argtypes = \
lib.ElZerosSparse_z.argtypes = \
lib.ElZerosDistSparse_i.argtypes = \
lib.ElZerosDistSparse_s.argtypes = \
lib.ElZerosDistSparse_d.argtypes = \
lib.ElZerosDistSparse_c.argtypes = \
lib.ElZerosDistSparse_z.argtypes = \
lib.ElZerosDistMultiVec_i.argtypes = \
lib.ElZerosDistMultiVec_s.argtypes = \
lib.ElZerosDistMultiVec_d.argtypes = \
lib.ElZerosDistMultiVec_c.argtypes = \
lib.ElZerosDistMultiVec_z.argtypes = \
[c_void_p,iType,iType]
def Zeros(A,m,n):
args = [A.obj,m,n]
if type(A) is Matrix:
if A.tag == iTag: lib.ElZeros_i(*args)
elif A.tag == sTag: lib.ElZeros_s(*args)
elif A.tag == dTag: lib.ElZeros_d(*args)
elif A.tag == cTag: lib.ElZeros_c(*args)
elif A.tag == zTag: lib.ElZeros_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == iTag: lib.ElZerosDist_i(*args)
elif A.tag == sTag: lib.ElZerosDist_s(*args)
elif A.tag == dTag: lib.ElZerosDist_d(*args)
elif A.tag == cTag: lib.ElZerosDist_c(*args)
elif A.tag == zTag: lib.ElZerosDist_z(*args)
else: DataExcept()
elif type(A) is SparseMatrix:
if A.tag == iTag: lib.ElZerosSparse_i(*args)
elif A.tag == sTag: lib.ElZerosSparse_s(*args)
elif A.tag == dTag: lib.ElZerosSparse_d(*args)
elif A.tag == cTag: lib.ElZerosSparse_c(*args)
elif A.tag == zTag: lib.ElZerosSparse_z(*args)
else: DataExcept()
elif type(A) is DistSparseMatrix:
if A.tag == iTag: lib.ElZerosDistSparse_i(*args)
elif A.tag == sTag: lib.ElZerosDistSparse_s(*args)
elif A.tag == dTag: lib.ElZerosDistSparse_d(*args)
elif A.tag == cTag: lib.ElZerosDistSparse_c(*args)
elif A.tag == zTag: lib.ElZerosDistSparse_z(*args)
else: DataExcept()
elif type(A) is DistMultiVec:
if A.tag == iTag: lib.ElZerosDistMultiVec_i(*args)
elif A.tag == sTag: lib.ElZerosDistMultiVec_s(*args)
elif A.tag == dTag: lib.ElZerosDistMultiVec_d(*args)
elif A.tag == cTag: lib.ElZerosDistMultiVec_c(*args)
elif A.tag == zTag: lib.ElZerosDistMultiVec_z(*args)
else: DataExcept()
else: TypeExcept()
# Random
# ======
# Bernoulli
# ---------
lib.ElBernoulli_i.argtypes = \
lib.ElBernoulli_s.argtypes = \
lib.ElBernoulli_d.argtypes = \
lib.ElBernoulli_c.argtypes = \
lib.ElBernoulli_z.argtypes = \
lib.ElBernoulliDist_i.argtypes = \
lib.ElBernoulliDist_s.argtypes = \
lib.ElBernoulliDist_d.argtypes = \
lib.ElBernoulliDist_c.argtypes = \
lib.ElBernoulliDist_z.argtypes = \
[c_void_p,iType,iType]
def Bernoulli(A,m,n):
args = [A.obj,m,n]
if type(A) is Matrix:
if A.tag == iTag: lib.ElBernoulli_i(*args)
elif A.tag == sTag: lib.ElBernoulli_s(*args)
elif A.tag == dTag: lib.ElBernoulli_d(*args)
elif A.tag == cTag: lib.ElBernoulli_c(*args)
elif A.tag == zTag: lib.ElBernoulli_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == iTag: lib.ElBernoulliDist_i(*args)
elif A.tag == sTag: lib.ElBernoulliDist_s(*args)
elif A.tag == dTag: lib.ElBernoulliDist_d(*args)
elif A.tag == cTag: lib.ElBernoulliDist_c(*args)
elif A.tag == zTag: lib.ElBernoulliDist_z(*args)
else: DataExcept()
else: TypeExcept()
# Gaussian
# --------
lib.ElGaussian_s.argtypes = [c_void_p,iType,iType,sType,sType]
lib.ElGaussian_d.argtypes = [c_void_p,iType,iType,dType,dType]
lib.ElGaussian_c.argtypes = [c_void_p,iType,iType,cType,sType]
lib.ElGaussian_z.argtypes = [c_void_p,iType,iType,zType,dType]
lib.ElGaussianDist_s.argtypes = [c_void_p,iType,iType,sType,sType]
lib.ElGaussianDist_d.argtypes = [c_void_p,iType,iType,dType,dType]
lib.ElGaussianDist_c.argtypes = [c_void_p,iType,iType,cType,sType]
lib.ElGaussianDist_z.argtypes = [c_void_p,iType,iType,zType,dType]
lib.ElGaussianDistMultiVec_s.argtypes = [c_void_p,iType,iType,sType,sType]
lib.ElGaussianDistMultiVec_d.argtypes = [c_void_p,iType,iType,dType,dType]
lib.ElGaussianDistMultiVec_c.argtypes = [c_void_p,iType,iType,cType,sType]
lib.ElGaussianDistMultiVec_z.argtypes = [c_void_p,iType,iType,zType,dType]
def Gaussian(A,m,n,meanPre=0,stddev=1):
mean = TagToType(A.tag)(meanPre)
args = [A.obj,m,n,mean,stddev]
if type(A) is Matrix:
if A.tag == sTag: lib.ElGaussian_s(*args)
elif A.tag == dTag: lib.ElGaussian_d(*args)
elif A.tag == cTag: lib.ElGaussian_c(*args)
elif A.tag == zTag: lib.ElGaussian_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == sTag: lib.ElGaussianDist_s(*args)
elif A.tag == dTag: lib.ElGaussianDist_d(*args)
elif A.tag == cTag: lib.ElGaussianDist_c(*args)
elif A.tag == zTag: lib.ElGaussianDist_z(*args)
else: DataExcept()
elif type(A) is DistMultiVec:
if A.tag == sTag: lib.ElGaussianDistMultiVec_s(*args)
elif A.tag == dTag: lib.ElGaussianDistMultiVec_d(*args)
elif A.tag == cTag: lib.ElGaussianDistMultiVec_c(*args)
elif A.tag == zTag: lib.ElGaussianDistMultiVec_z(*args)
else: DataExcept()
else: TypeExcept()
# Normal uniform spectrum
# -----------------------
lib.ElNormalUniformSpectrum_c.argtypes = [c_void_p,iType,cType,sType]
lib.ElNormalUniformSpectrum_z.argtypes = [c_void_p,iType,zType,dType]
lib.ElNormalUniformSpectrumDist_c.argtypes = [c_void_p,iType,cType,sType]
lib.ElNormalUniformSpectrumDist_z.argtypes = [c_void_p,iType,zType,dType]
def NormalUniformSpectrum(A,n,centerPre=0,radius=1):
center = TagToType(A.tag)(centerPre)
args = [A.obj,n,center,radius]
if type(A) is Matrix:
if A.tag == cTag: lib.ElNormalUniformSpectrum_c(*args)
elif A.tag == zTag: lib.ElNormalUniformSpectrum_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == cTag: lib.ElNormalUniformSpectrumDist_c(*args)
elif A.tag == zTag: lib.ElNormalUniformSpectrumDist_z(*args)
else: DataExcept()
else: TypeExcept()
# Three-valued
# ------------
lib.ElThreeValued_i.argtypes = \
lib.ElThreeValued_s.argtypes = \
lib.ElThreeValued_d.argtypes = \
lib.ElThreeValued_c.argtypes = \
lib.ElThreeValued_z.argtypes = \
lib.ElThreeValuedDist_i.argtypes = \
lib.ElThreeValuedDist_s.argtypes = \
lib.ElThreeValuedDist_d.argtypes = \
lib.ElThreeValuedDist_c.argtypes = \
lib.ElThreeValuedDist_z.argtypes = \
[c_void_p,iType,iType,dType]
def ThreeValued(A,m,n,p=2./3.):
args = [A.obj,m,n,p]
if type(A) is Matrix:
if A.tag == iTag: lib.ElThreeValued_i(*args)
elif A.tag == sTag: lib.ElThreeValued_s(*args)
elif A.tag == dTag: lib.ElThreeValued_d(*args)
elif A.tag == cTag: lib.ElThreeValued_c(*args)
elif A.tag == zTag: lib.ElThreeValued_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == iTag: lib.ElThreeValuedDist_i(*args)
elif A.tag == sTag: lib.ElThreeValuedDist_s(*args)
elif A.tag == dTag: lib.ElThreeValuedDist_d(*args)
elif A.tag == cTag: lib.ElThreeValuedDist_c(*args)
elif A.tag == zTag: lib.ElThreeValuedDist_z(*args)
else: DataExcept()
else: TypeExcept()
# Uniform
# -------
lib.ElUniform_i.argtypes = \
lib.ElUniformDist_i.argtypes = \
lib.ElUniformDistMultiVec_i.argtypes = \
[c_void_p,iType,iType,iType,iType]
lib.ElUniform_s.argtypes = \
lib.ElUniformDist_s.argtypes = \
lib.ElUniformDistMultiVec_s.argtypes = \
[c_void_p,iType,iType,sType,sType]
lib.ElUniform_d.argtypes = \
lib.ElUniformDist_d.argtypes = \
lib.ElUniformDistMultiVec_d.argtypes = \
[c_void_p,iType,iType,dType,dType]
lib.ElUniform_c.argtypes = \
lib.ElUniformDist_c.argtypes = \
lib.ElUniformDistMultiVec_c.argtypes = \
[c_void_p,iType,iType,cType,sType]
lib.ElUniform_z.argtypes = \
lib.ElUniformDist_z.argtypes = \
lib.ElUniformDistMultiVec_z.argtypes = \
[c_void_p,iType,iType,zType,dType]
def Uniform(A,m,n,centerPre=0,radius=1):
center = TagToType(A.tag)(centerPre)
args = [A.obj,m,n,center,radius]
if type(A) is Matrix:
if A.tag == iTag: lib.ElUniform_i(*args)
elif A.tag == sTag: lib.ElUniform_s(*args)
elif A.tag == dTag: lib.ElUniform_d(*args)
elif A.tag == cTag: lib.ElUniform_c(*args)
elif A.tag == zTag: lib.ElUniform_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == iTag: lib.ElUniformDist_i(*args)
elif A.tag == sTag: lib.ElUniformDist_s(*args)
elif A.tag == dTag: lib.ElUniformDist_d(*args)
elif A.tag == cTag: lib.ElUniformDist_c(*args)
elif A.tag == zTag: lib.ElUniformDist_z(*args)
else: DataExcept()
elif type(A) is DistMultiVec:
if A.tag == iTag: lib.ElUniformDistMultiVec_i(*args)
elif A.tag == sTag: lib.ElUniformDistMultiVec_s(*args)
elif A.tag == dTag: lib.ElUniformDistMultiVec_d(*args)
elif A.tag == cTag: lib.ElUniformDistMultiVec_c(*args)
elif A.tag == zTag: lib.ElUniformDistMultiVec_z(*args)
else: DataExcept()
else: TypeExcept()
# Uniform Helmholtz Green's
# -------------------------
lib.ElUniformHelmholtzGreens_c.argtypes = \
lib.ElUniformHelmholtzGreensDist_c.argtypes = \
[c_void_p,iType,sType]
lib.ElUniformHelmholtzGreens_z.argtypes = \
lib.ElUniformHelmholtzGreensDist_z.argtypes = \
[c_void_p,iType,dType]
def UniformHelmholtzGreens(A,n,lamb):
args = [A.obj,n,lamb]
if type(A) is Matrix:
if A.tag == cTag: lib.ElUniformHelmholtzGreens_c(*args)
elif A.tag == zTag: lib.ElUniformHelmholtzGreens_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == cTag: lib.ElUniformHelmholtzGreensDist_c(*args)
elif A.tag == zTag: lib.ElUniformHelmholtzGreensDist_z(*args)
else: DataExcept()
else: TypeExcept()
# Wigner
# ------
lib.ElWigner_s.argtypes = \
lib.ElWignerDist_s.argtypes = \
[c_void_p,iType,sType,sType]
lib.ElWigner_d.argtypes = \
lib.ElWignerDist_d.argtypes = \
[c_void_p,iType,dType,dType]
lib.ElWigner_c.argtypes = \
lib.ElWignerDist_c.argtypes = \
[c_void_p,iType,cType,sType]
lib.ElWigner_z.argtypes = \
lib.ElWignerDist_z.argtypes = \
[c_void_p,iType,zType,dType]
def Wigner(A,n,meanPre=0,stddev=1):
mean = TagToType(A.tag)(meanPre)
args = [A.obj,n,mean,stddev]
if type(A) is Matrix:
if A.tag == sTag: lib.ElWigner_s(*args)
elif A.tag == dTag: lib.ElWigner_d(*args)
elif A.tag == cTag: lib.ElWigner_c(*args)
elif A.tag == zTag: lib.ElWigner_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == sTag: lib.ElWignerDist_s(*args)
elif A.tag == dTag: lib.ElWignerDist_d(*args)
elif A.tag == cTag: lib.ElWignerDist_c(*args)
elif A.tag == zTag: lib.ElWignerDist_z(*args)
else: DataExcept()
else: TypeExcept()
|
birm/Elemental
|
python/matrices.py
|
Python
|
bsd-3-clause
| 86,117
|
[
"Gaussian"
] |
1765a0323359313cb902d098d06171dd765796910581a9aff4231c72c08ba322
|
# Created by DrLecter, based on DraX' scripts,
# this script is part of the L2J Official Datapack Project
# Visit us at http://www.l2jdp.com/
# See readme-dp.txt and gpl.txt for license and distribution details
# Let us know if you did not receive a copy of such files.
import sys
from com.l2scoria.gameserver.model.quest import State
from com.l2scoria.gameserver.model.quest import QuestState
from com.l2scoria.gameserver.model.quest.jython import QuestJython as JQuest
qn = "dark_elven_change_1"
#print "Dark elven change 1"
#Quest items
GAZE_OF_ABYSS = 1244
IRON_HEART = 1252
JEWEL_OF_DARKNESS = 1261
ORB_OF_ABYSS = 1270
#Reward Item
SHADOW_WEAPON_COUPON_DGRADE = 8869
#XENOS,TOBIAS,TRONIX
NPCS=[30290,30297,30462]
#event:[newclass,req_class,req_race,low_ni,low_i,ok_ni,ok_i,req_item]
#low_ni : level too low, and you don't have quest item
#low_i: level too low, despite you have the item
#ok_ni: level ok, but you don't have quest item
#ok_i: level ok, you got quest item, class change takes place
CLASSES = {
"PK":[32,31,2,"15","16","17","18",GAZE_OF_ABYSS],
"AS":[35,31,2,"19","20","21","22",IRON_HEART],
"DW":[39,38,2,"23","24","25","26",JEWEL_OF_DARKNESS],
"SO":[42,38,2,"27","28","29","30",ORB_OF_ABYSS]
}
#Messages
default = "No Quest"
def change(st,player,newclass,item) :
st.takeItems(item,1)
st.playSound("ItemSound.quest_fanfare_2")
player.setClassId(newclass)
player.setBaseClass(newclass)
player.broadcastUserInfo()
return
class Quest (JQuest) :
def __init__(self,id,name,descr): JQuest.__init__(self,id,name,descr)
def onAdvEvent (self,event,npc,player) :
npcId = npc.getNpcId()
htmltext = default
suffix = ''
st = player.getQuestState(qn)
if not st : return
race = player.getRace().ordinal()
classid = player.getClassId().getId()
level = player.getLevel()
if npcId not in NPCS : return
if not event in CLASSES.keys() :
return event
else :
newclass,req_class,req_race,low_ni,low_i,ok_ni,ok_i,req_item=CLASSES[event]
if race == req_race and classid == req_class :
item = st.getQuestItemsCount(req_item)
if level < 20 :
suffix = "-"+low_i+".htm"
if not item :
suffix = "-"+low_ni+".htm"
else :
if not item :
suffix = "-"+ok_ni+".htm"
else :
suffix = "-"+ok_i+".htm"
st.giveItems(SHADOW_WEAPON_COUPON_DGRADE,15)
change(st,player,newclass,req_item)
st.exitQuest(1)
htmltext = str(npcId)+suffix
return htmltext
def onTalk (self,npc,player):
st = player.getQuestState(qn)
npcId = npc.getNpcId()
race = player.getRace().ordinal()
classId = player.getClassId()
id = classId.getId()
htmltext = default
if player.isSubClassActive() :
st.exitQuest(1)
return htmltext
# Dark Elves only
if npcId in NPCS :
htmltext = str(npcId)
if race in [2] :
if classId.level() == 1 : # first occupation change already made
htmltext += "-32.htm"
elif classId.level() >= 2 : # second/third occupation change already made
htmltext += "-31.htm"
elif id == 31 : # DE Fighter
return htmltext+"-01.htm"
elif id == 38 : # DE Mystic
return htmltext+"-08.htm"
else :
htmltext += "-33.htm" # other races
st.exitQuest(1)
return htmltext
QUEST = Quest(99997,qn,"village_master")
CREATED = State('Start', QUEST)
QUEST.setInitialState(CREATED)
for npc in NPCS :
QUEST.addStartNpc(npc)
QUEST.addTalkId(npc)
|
zenn1989/scoria-interlude
|
L2Jscoria-Game/data/scripts/village_master/dark_elven_change_1/__init__.py
|
Python
|
gpl-3.0
| 3,627
|
[
"VisIt"
] |
015d45cda848397ad6fae0cef454da37059879593e29e9be0424273919211c62
|
"""Checker for E9973 missing-space-in-doctest"""
import re
from typing import Match, Optional, Union
from astroid import nodes
from pylint.checkers import BaseChecker
from pylint.checkers.utils import check_messages
from pylint.interfaces import IAstroidChecker
DOCTEST = ">>>"
class MissingSpaceInDoctestChecker(BaseChecker):
__implements__ = IAstroidChecker
name = "missing_space_in_doctest"
msgs = {
"E9973": (
'Space missing after >>> in the docstring of function "%s."',
"missing-space-in-doctest",
"Used when a doctest is missing a space before the code to be executed",
)
}
# This is important so that your checker is executed before others
priority = -1
@check_messages("missing-space-in-doctest")
def visit_functiondef(self, node: nodes.FunctionDef) -> None:
"""Visit a function definition"""
docstring = node.doc
if docstring is not None:
start_line = node.lineno + 1
lines = docstring.split("\n")
for line_no, line in enumerate(lines):
if self._has_invalid_doctest(line):
self.add_message(
"missing-space-in-doctest",
node=node,
args=node.name,
line=line_no + start_line,
)
# Helper Function
def _has_invalid_doctest(self, doc: str) -> Union[bool, Optional[Match[str]]]:
"""Return whether the docstring line contains an invalid doctest"""
start_index = doc.find(DOCTEST)
contains_doctest = start_index != -1
if contains_doctest and len(doc) == 3:
return True # The doctest isn't followed by any character
match = re.match("\s*>>>\w", doc)
return match
def register(linter):
"""Required method to auto register this checker"""
linter.register_checker(MissingSpaceInDoctestChecker(linter))
|
pyta-uoft/pyta
|
python_ta/checkers/missing_space_in_doctest_checker.py
|
Python
|
gpl-3.0
| 1,983
|
[
"VisIt"
] |
cd1b1b750a077ea4dc89490c0ab8cf615892120b9445efd38702345856a879bf
|
###############################
# This file is part of PyLaDa.
#
# Copyright (C) 2013 National Renewable Energy Lab
#
# PyLaDa is a high throughput computational platform for Physics. It aims to make it easier to submit
# large numbers of jobs on supercomputers. It provides a python interface to physical input, such as
# crystal structures, as well as to a number of DFT (VASP, CRYSTAL) and atomic potential programs. It
# is able to organise and launch computational jobs on PBS and SLURM.
#
# PyLaDa is free software: you can redistribute it and/or modify it under the terms of the GNU General
# Public License as published by the Free Software Foundation, either version 3 of the License, or (at
# your option) any later version.
#
# PyLaDa is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even
# the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along with PyLaDa. If not, see
# <http://www.gnu.org/licenses/>.
###############################
#!/usr/bin/python
# PBS -e global_comm_err
# PBS -o global_comm_out
# PBS -N global_comm
# PBS -l mppwidth=64
# PBS -l walltime=00:02:00
# PBS -A e05-qmdev-nic
#PBS -V
def test(nbprocs, ppn):
import pylada
from pylada.process.mpi import create_global_comm
pylada.default_comm['ppn'] = ppn
pylada.default_comm['n'] = nbprocs
print('EXPECTED N={0}, PPN={1}'.format(nbprocs, ppn))
create_global_comm(nbprocs)
print('FOUND')
for u in pylada.default_comm.items():
print(u[0], u[1])
print('MACHINES')
for u in pylada.default_comm.machines.items():
print(u[0], u[1])
if __name__ == '__main__':
test(nbprocs=64, ppn=32)
|
pylada/pylada-light
|
tests/process/globalcomm.py
|
Python
|
gpl-3.0
| 1,830
|
[
"CRYSTAL",
"VASP"
] |
8e4988aa6f3f17224d0158f57b1445184e9d1fd181c3b7c74a581eac1874b379
|
# encoding: utf-8
"""
Global exception classes for IPython.core.
Authors:
* Brian Granger
* Fernando Perez
* Min Ragan-Kelley
Notes
-----
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Exception classes
#-----------------------------------------------------------------------------
class IPythonCoreError(Exception):
pass
class TryNext(IPythonCoreError):
"""Try next hook exception.
Raise this in your hook function to indicate that the next hook handler
should be used to handle the operation.
"""
class UsageError(IPythonCoreError):
"""Error in magic function arguments, etc.
Something that probably won't warrant a full traceback, but should
nevertheless interrupt a macro / batch file.
"""
class StdinNotImplementedError(IPythonCoreError, NotImplementedError):
"""raw_input was requested in a context where it is not supported
For use in IPython kernels, where only some frontends may support
stdin requests.
"""
|
cloud9ers/gurumate
|
environment/lib/python2.7/site-packages/IPython/core/error.py
|
Python
|
lgpl-3.0
| 1,540
|
[
"Brian"
] |
9c17665f4a8ab779925922de238ea8ae5eda1e7ae7c70adb72772ad09d064660
|
# coding=utf-8
#------------------------------------------------------------------------------
# Name: test_mapper_opendap.py
# Purpose: Test the Opendap class
#
# Author: Artem Moiseev
#
# Created: 15.08.2018
# Copyright: (c) NERSC
# Licence: This file is part of NANSAT. You can redistribute it or modify
# under the terms of GNU General Public License, v.3
# http://www.gnu.org/licenses/gpl-3.0.html
#------------------------------------------------------------------------------
import sys
import unittest
from netCDF4 import Dataset
import tempfile
from mock import patch
from nansat.exceptions import WrongMapperError
from collections import OrderedDict
from nansat.mappers.opendap import Opendap
import numpy as np
import warnings
class OpenDAPTests(unittest.TestCase):
def setUp(self):
_, tmp_filename = tempfile.mkstemp(suffix='.nc')
ds = Dataset(tmp_filename, 'w')
lat_sz = 30
lon_sz = 20
values = np.random.random_sample((lat_sz, lon_sz))
# Set dimensions
ds.createDimension('lat', lat_sz)
ds.createDimension('lon', lon_sz)
ds.createDimension('time', 3)
ds.createDimension('depth', 10)
# Set variables
# 1d "dimensional" variables i.e lats, times, etc.
times = ds.createVariable('var2', 'i4', ('time'))
lats = ds.createVariable('lat', 'i4', ('lat'))
lats[:] = np.linspace(0, 60, lat_sz)
lons = ds.createVariable('lon', 'i4', ('lon'))
lons[:] = np.linspace(0, 20, lon_sz)
# Spatial variables 2d, 3d, and 4d
ds.createVariable('var2d', 'i4', ('lat', 'lon'))
ds.createVariable('var3d', 'i4', ('time', 'lat', 'lon'))
ds.createVariable('var4d', 'f4', ('time', 'depth', 'lat', 'lon'))
# Initiate the Opendap object
self.od = Opendap()
self.od.baseURLs = ['http://first.no', 'http://second.com']
self.od.xName = 'lon'
self.od.yName = 'lat'
self.od.timeVarName = 'time'
self.od.ds = ds
self.ds = ds
def test_test_mapper(self):
res_ok = self.od.test_mapper(filename='http://first.no/path/to/the/file.nc')
self.assertIsNone(res_ok)
with self.assertRaises(WrongMapperError):
self.od.test_mapper(filename='http://not-in-base-urls.net/path/to/the/file.nc')
""" Not sure how to test get_dataset..
According toe the followingm, mocking C modules seems not to be possible.. Can that be the
reason the below doesn't work?
See https://stackoverflow.com/questions/192649/can-you-monkey-patch-methods-on-core-types-in-python/192857#192857
"""
##@patch('nansat.mappers.opendap.Dataset.__init__')
#@patch.object('nansat.mappers.opendap.Dataset', '__init__', return_value = None)
#def test_get_dataset(self, mock_dataset):
# dd = Dataset()
# self.od.filename = 'http://something.that.is/mocked'
# ds1 = self.od.get_dataset(None)
# self.assertIsInstance(ds1, Dataset)
def test_get_dataset_which_does_not_exist(self):
wrong_filename = '/path/which/does/not/exist/file.nc'
self.od.filename = wrong_filename
if sys.version_info.major==2:
with self.assertRaises(IOError) as ve:
self.od.get_dataset(None)
else:
with self.assertRaises(FileNotFoundError) as ve:
self.od.get_dataset(None)
self.assertEqual(ve.exception.errno, 2)
with self.assertRaises(ValueError) as ve:
self.od.get_dataset([])
self.assertEqual(ve.exception.args[0], 'Input ds is not netCDF.Dataset!')
def test_get_geospatial_variable_names(self):
ds_vars = self.od.get_geospatial_variable_names()
self.assertEqual(len(ds_vars), 3)
self.assertIn('var2d', ds_vars)
self.assertIn('var3d', ds_vars)
self.assertIn('var4d', ds_vars)
def test_get_dataset_time(self):
pass
def test_get_layer_datetime(self):
date1 = '2010-01-02'
datetimes_1 = np.arange(np.datetime64('2010-01-01'), np.datetime64('2010-01-02'))
res_layer_num1, res_layer_date1 = Opendap.get_layer_datetime(date1, datetimes_1)
self.assertEqual(res_layer_num1, 0)
self.assertIsInstance(res_layer_date1, np.datetime64)
datetimes_2 = np.arange(np.datetime64('2009-01-01'), np.datetime64('2009-01-08'))
with self.assertRaises(ValueError):
Opendap.get_layer_datetime(date1, datetimes_2)
datetimes_3 = np.arange(np.datetime64('2009-12-31'), np.datetime64('2010-01-08'))
res_layer_num3, res_layer_date3 = Opendap.get_layer_datetime(date1, datetimes_3)
self.assertEqual(res_layer_date3, np.datetime64(date1))
self.assertEqual(res_layer_num3, 2)
@patch('nansat.mappers.opendap.gdal.Open')
def test_metaitem(self, mock_open):
mock_open.return_value = None
self.od.ds.variables['var3d'].setncattr('test_attr', 'test_val')
res1 = self.od.get_metaitem('https://file-url.nc', 'var3d', (0, 'y', 'x'))
self.assertIsInstance(res1, dict)
self.assertEqual(len(res1.keys()), 2)
self.assertIn('dst', res1.keys())
self.assertIn('src', res1.keys())
self.assertIn('SourceFilename', res1['src'].keys())
self.assertIn('SourceBand', res1['src'].keys())
self.assertIn('name', res1['dst'].keys())
self.assertIn('dataType', res1['dst'].keys())
self.assertIn('test_attr', res1['dst'].keys())
self.od.ds.variables['var3d'].delncattr('test_attr')
@patch('nansat.mappers.opendap.gdal.Open')
def test_get_metaitem_spec_attrs(self, mock_open):
mock_open.return_value = None
test_cases = [
{'key': 'offset', 'val': 'offset_value', 'meta_key': 'ScaleOffset'},
{'key': 'add_offset', 'val': 'add_offset_value', 'meta_key': 'ScaleOffset'},
{'key': 'scale', 'val': 'scale_value', 'meta_key': 'ScaleRatio'},
{'key': 'scale_factor', 'val': 'scale_factor_value', 'meta_key': 'ScaleRatio'},
]
for test_case in test_cases:
self.od.ds.variables['var3d'].setncattr(test_case['key'], test_case['val'])
res1 = self.od.get_metaitem('https://file-url.nc', 'var3d', (0, 'y', 'x'))
self.assertIn((test_case['meta_key'], test_case['val']), res1['src'].items())
self.od.ds.variables['var3d'].delncattr(test_case['key'])
def test_fix_encoding(self):
self.assertEqual(Opendap._fix_encoding(u'åsnes'), 'snes')
self.assertEqual(Opendap._fix_encoding('asnes'), 'asnes')
def test_filter_dimensions(self):
res1 = list(filter(self.od._filter_dimensions, ['time', 'var1', 'lat', 'lon']))
self.assertIsInstance(res1, list)
self.assertEqual(len(res1), 1)
self.assertEqual(res1[0], 'var1')
res2 = list(filter(self.od._filter_dimensions, ['time', 'lat', 'lon']))
self.assertEqual(len(res2), 0)
@patch('nansat.mappers.opendap.gdal.Open')
def test_create_metadict(self, mock_open):
mock_open.return_value = None
res1 = self.od.create_metadict('test.nc', ['var4d'], 0)
self.assertIsInstance(res1, list)
self.assertEqual(len(res1), 10)
for i in range(len(res1)):
source_filename = res1[i]['src']['SourceFilename'][19:31]
self.assertEqual(source_filename, '[0][%s][y][x]' % i)
res2 = self.od.create_metadict('test.nc', ['var2d', 'var3d'], 0)
self.assertEqual(len(res2), 2)
self.assertEqual(res2[0]['src']['SourceFilename'][19:25], '[y][x]')
self.assertEqual(res2[1]['src']['SourceFilename'][19:28], '[0][y][x]')
def test_get_time_coverage_resolution(self):
res1 = self.od.get_time_coverage_resolution()
self.assertIsInstance(res1, int)
self.assertEqual(res1, 0)
self.od.ds.setncattr('time_coverage_resolution', 'P2D')
res2 = self.od.get_time_coverage_resolution()
self.assertEqual(res2, 172800)
self.od.ds.setncattr('time_coverage_resolution', 'wrong_value')
with warnings.catch_warnings():
_ = self.od.get_time_coverage_resolution()
self.od.ds.delncattr('time_coverage_resolution')
def test_get_shape(self):
res = self.od.get_shape()
self.assertIsInstance(res, tuple)
self.assertEqual(len(res), 2)
self.assertEqual(res[0], 20)
self.assertEqual(res[1], 30)
def test_get_geotransfort(self):
res = self.od.get_geotransform()
self.assertIsInstance(res, tuple)
self.assertEqual(len(res), 6)
self.assertEqual(res, (0, 1, 0, 0, 0, 2))
|
nansencenter/nansat
|
nansat/tests/mappers/test_mapper_opendap.py
|
Python
|
gpl-3.0
| 8,767
|
[
"NetCDF"
] |
a44a8092bec453e713abf14a14371b7933b99eab5c3332756d095c1ed6c0b48a
|
############################################################################
##
## Copyright (C) 2006-2007 University of Utah. All rights reserved.
##
## This file is part of VisTrails.
##
## This file may be used under the terms of the GNU General Public
## License version 2.0 as published by the Free Software Foundation
## and appearing in the file LICENSE.GPL included in the packaging of
## this file. Please review the following to ensure GNU General Public
## Licensing requirements will be met:
## http://www.opensource.org/licenses/gpl-license.php
##
## If you are unsure which license is appropriate for your use (for
## instance, you are interested in developing a commercial derivative
## of VisTrails), please contact us at contact@vistrails.org.
##
## This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
## WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
##
############################################################################
import core.modules
import core.modules.module_registry
from core.modules.vistrails_module import Module, ModuleError
from SciPy import SciPy
from Matrix import Matrix, COOMatrix, SparseMatrix, CSRMatrix
from scipy import sparse
import numpy, scipy
class MatrixConvert(SciPy):
def compute(self):
m = self.get_input("InputMatrix")
to = self.get_input("OutputType")
to = to.upper()
if to == 'Dense':
self.matrix = DenseMatrix(m.matrix.todense())
self.set_output("SparseOutput", self.matrix)
else:
self.matrix = SparseMatrix(m.matrix.tocsc())
self.set_output("SparseOutput", self.matrix)
class vtkDataSetToMatrix(SciPy):
''' In some cases, particularly in terms of user-defined VTK Filters, the
output of the filter is a vtk datatype representing '''
def from_unstructured_grid(self, vtkalgout):
import vtk
prod = vtkalgout.vtkInstance.GetProducer()
prod.Update()
grid = prod.GetOutput()
pt_set = grid.GetPoints()
scalars = grid.GetPointData().GetScalars()
''' Points in vtk are always 3D... so we must assume this. '''
self.matrix_ = SparseMatrix()
self.matrix_.matrix = sparse.csc_matrix((grid.GetNumberOfPoints(), 3))
i = 0
while i < grid.GetNumberOfPoints():
(x,y,z) = pt_set.GetPoint(i)
self.matrix_.matrix[i,0] = x
self.matrix_.matrix[i,1] = y
self.matrix_.matrix[i,2] = z
print x, y, z
i += 1
def compute(self):
if self.has_input("vtkUnstructuredGrid"):
self.from_unstructured_grid(self.get_input("vtkUnstructuredGrid"))
else:
pass
self.set_output("Output Matrix", self.matrix_)
class PhaseHistogramToVTKPoints(SciPy):
def form_point_set(self, histo, point_set):
(slices, numbins) = histo.shape
phases = numpy.arange(numbins)
phases = phases * (360. / numbins)
phases += phases[1] / 2.
phi_step = phases[0]
for time in xrange(slices):
z = float(time)
for bin in xrange(numbins):
r = histo[time,bin]
theta = phi_step * (bin+1)
theta *= (scipy.pi / 180.)
x = r*scipy.cos(theta)
y = r*scipy.sin(theta)
point_set.InsertNextPoint(x, y, z)
for bin in xrange(numbins):
curbin = bin
lastbin = bin-1
if lastbin < 0:
lastbin = numbins-1
r = (histo[time,bin] - histo[time,lastbin]) / 2.
theta = curbin * 360. / numbins
x = r*scipy.cos(theta)
y = r*scipy.sin(theta)
point_set.InsertNextPoint(x, y, z)
def compute(self):
import vtk
phasors = self.get_input("FFT Input")
numbins = self.get_input("Num Bins")
phasor_matrix = phasors.matrix.toarray()
(timeslices,phases) = phasor_matrix.shape
point_set = vtk.vtkPoints()
histo = numpy.zeros((timeslices, numbins))
for time in xrange(timeslices):
phase_slice = phasor_matrix[time,:]
reals = phase_slice.real
imaginary = phase_slice.imag
phases = scipy.arctan2(imaginary, reals)
phases = phases * (180. / scipy.pi)
bins = phases % numbins
for b in bins:
histo[time,b] += 1
self.form_point_set(histo, point_set)
pointdata = vtk.vtkUnstructuredGrid()
pointdata.SetPoints(point_set)
self.surf_filter = vtk.vtkSurfaceReconstructionFilter()
self.surf_filter.SetInput(0,pointdata)
# self.surf_filter.Update()
reg = core.modules.module_registry
vtk_set = reg.registry.get_descriptor_by_name('edu.utah.sci.vistrails.vtk', 'vtkAlgorithmOutput').module()
vtk_set.vtkInstance = self.surf_filter.GetOutputPort()
histo_mat = SparseMatrix()
histo_mat.matrix = sparse.csc_matrix(histo)
self.set_output("Num Slices", timeslices)
self.set_output("Phase Histogram", histo_mat)
self.set_output("Phase Geometry", vtk_set)
|
VisTrails/VisTrails
|
contrib/SciPy/MatrixConvert.py
|
Python
|
bsd-3-clause
| 5,350
|
[
"VTK"
] |
0c7aa0eaf20f6e9557c2c50c4e1236759ca3f273639f6ef6fe2a0fc17d4dc82e
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2002-2007 Donald N. Allingham
# Copyright (C) 2007-2008 Brian G. Matherly
# Copyright (C) 2008 Jerome Rapinat
# Copyright (C) 2008 Benny Malengier
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#-------------------------------------------------------------------------
#
# Standard Python modules
#
#-------------------------------------------------------------------------
from ....const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
#-------------------------------------------------------------------------
#
# GRAMPS modules
#
#-------------------------------------------------------------------------
from .._hasnotebase import HasNoteBase
#-------------------------------------------------------------------------
# "Events having notes"
#-------------------------------------------------------------------------
class HasNote(HasNoteBase):
"""Events having notes"""
name = _('Events having <count> notes')
description = _("Matches events having a certain number of notes")
|
pmghalvorsen/gramps_branch
|
gramps/gen/filters/rules/event/_hasnote.py
|
Python
|
gpl-2.0
| 1,754
|
[
"Brian"
] |
da44df409f100a960affd664ce9e252e4bff5ad18c791fd61e3df2f048038801
|
#
# PyEarthScience: read_netCDF_with_PyNIO.py
#
# Description:
# Demonstrate the use of PyNIO to open and read the content of
# a netCDF file.
#
# Author:
# Karin Meier-Fleischer
#
# Date of initial publication:
# August, 2019
#
'''
PyEarthScience: read_netCDF_with_PyNIO.py
Description:
Demonstrate the use of PyNIO to open and read the content of
a netCDF file.
- PyNIO
- PyNGL
- numpy
- netCDF
'''
import os
import numpy as np
import Ngl, Nio
#-----------------------------------------------------------------------
#-- Function: getVariableNames() - return the variable names (without coordinates)
#-----------------------------------------------------------------------
def getVariableNames(file):
dims = file.dimensions
coords = list(dims.keys())
names = list(file.variables.keys())
vnames = [n for n in names if n not in coords]
return(vnames, coords)
#-----------------------------------------------------------------------
#-- Function: main
#-----------------------------------------------------------------------
def main():
#-- input file rectilinear_grid_3d.nc from the NCL User Guide
#-- is available in the PyNGL installation
home = os.environ.get('HOME')
fname = os.path.join(home,"local/miniconda2/envs/pyngl_py3/lib/python3.7/site-packages/ngl/ncarg/data/nug/rectilinear_grid_3D.nc") #-- data file name
#-- open file and print some information similar to ncdump and others
file = Nio.open_file(fname,"r")
print('------------------------------------------------------')
print()
print('--> file ', file)
print()
print('--> file attributes ', file.attributes)
print()
dims = file.dimensions
print('--> file dimensions ', dims.values)
print()
print('--> file size of dimension time = ', dims['time'])
print('--> file size of dimension lat = ', dims['lat'])
print('--> file size of dimension lon = ', dims['lon'])
#-- same as above
#print('--> file size of dimension time = ', file.dimensions['time'])
#print('--> file size of dimension lat = ', file.dimensions['lat'])
#print('--> file size of dimension lon = ', file.dimensions['lon'])
print()
#-- get the variable and coordinates names (using function defined at the top of the script)
vnames, coords = getVariableNames(file)
print('--> Variable names (no coordinates): ', vnames)
print('--> Variable names for coordinates: ', coords)
print()
#-- get the attributes of variable 't'
vattr = [getattr(file.variables['t'],a) for a in file.variables['t'].attributes.keys()]
print('--> file variable attributes ', list(vattr))
print()
#-- read variable 't', first timestep, first level
var = file.variables['t'][0,0,:,:]
#-- print the size and shape of the variable
print('------------------------------------------------------')
print()
print('--> var.size ',var.shape[0] * var.shape[1])
print('--> var.shape ',var.shape)
#-- same as
#print('--> var.size ',f.dimensions['lat'] * f.dimensions['lon'])
#print('--> var.shape ',var.shape)
print()
#-- read variable latitude and longitude arrays
lat = file.variables['lat'][:]
lon = file.variables['lon'][:]
#-- print the minimum and maximum of lat and lon
print('------------------------------------------------------')
print()
print('--> lat min ', lat.min().item())
print('--> lat max ', lat.max())
print('--> lon min ', lon.min())
print('--> lon max ', lon.max())
#-- the above notation has the same results as below
#print('--> lat min ', lat.min().item())
#print('--> lat max ', lat.max().item())
#print('--> lon min ', lon.min().item())
#print('--> lon max ', lon.max().item())
print()
#-- retrieve the name of the coordinates lat/lon variables and the values of
#-- the shape of the coordinates
dimslat = coords[0]
shapelat = lat.shape[0]
dimslon = coords[1]
shapelon = lon.shape[0]
nrlat = shapelat
nrlon = shapelon
print('------------------------------------------------------')
print()
print('--> dimslat: ',dimslat, ' dimslon: ',dimslon,' nrlat: ',nrlat,' nrlon: ',nrlon)
print()
#-- print variable information
print('------------------------------------------------------')
print()
print('--> var information')
print()
print(var)
print()
##-- print the variable attributes
#print('------------------------------------------------------')
#print()
#print('--> attributes: ',var.key())
#print()
#-- print the variable values
#print('------------------------------------------------------')
#print()
#print('--> values ')
#print()
#print(var.values)
#print()
#-- print the type of the variable (DataArray)
print('------------------------------------------------------')
print()
print('--> type(var) ',type(var))
print()
#-- print the type of the variable values (numpy.ndarray)
print('------------------------------------------------------')
print()
print('--> type(var.values) ',type(var[:,:]))
print()
#-- select variable t from dataset for first timestep
print('------------------------------------------------------')
print()
print('--> dataset variable t (time=0, lev=6)')
print()
print(file.variables['t'][0,6,:,:])
print()
#-- select variable t from dataset, lat index 1 and lon index 2
print('------------------------------------------------------')
print()
print('--> dataset variable t select data which is closest to lat=1 and lon=2')
print()
print(file.variables['t'][:,:,1,2])
print()
#-- select variable t, timestep 2001-01-01
print('------------------------------------------------------')
print()
print('--> time(0) = "2001-01-01"')
print()
print(file.variables['t'][0,:,:,:])
print()
#-- select a sub-region (slice) - Take attention to the strange notation of the selection!
#-- The leading i tells PyNIO to use the index instead of coordinate values, e.g. time|i0
print('------------------------------------------------------')
print()
print('--> select sub-region')
print()
print(file.variables['t']['time|i0 lev|: lat|20:0 lon|-25:0'])
print()
#-- print median values of variable t of dataset, one value for each level (axis=lat,lon)
print('------------------------------------------------------')
print()
print('--> variable median')
print()
print(np.median(file.variables['t'],axis=(2,3)))
print()
#-- compute the means of the variable t of the dataset, one value for each level (axis=lat,lon)
print('------------------------------------------------------')
print()
print('--> means')
print()
means = np.mean(file.variables['t'], axis=(2,3))
print(means)
print()
#-- compute the mean of the variable t which are greater than 273.15 K
print('------------------------------------------------------')
print()
print('--> only means greater than 273.15 K')
print()
print(means[np.where(means > 273.15)])
print()
#-------------------------------------------------------------
#-- run main
#-------------------------------------------------------------
if __name__ == "__main__":
main()
|
KMFleischer/PyEarthScience
|
IO/read_netCDF_with_PyNIO.py
|
Python
|
mit
| 7,732
|
[
"NetCDF"
] |
4e25ddab667bc948b7e7ec31f2f1d391069900e565b5d1ab1569ffc8da4c0d05
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
import json
import frappe.utils
from frappe.utils import cstr, flt, getdate, comma_and, cint
from frappe import _
from frappe.model.mapper import get_mapped_doc
from erpnext.stock.stock_balance import update_bin_qty, get_reserved_qty
from frappe.desk.notifications import clear_doctype_notifications
from erpnext.controllers.selling_controller import SellingController
form_grid_templates = {
"items": "templates/form_grid/item_grid.html"
}
class WarehouseRequired(frappe.ValidationError): pass
class SalesOrder(SellingController):
def validate(self):
super(SalesOrder, self).validate()
self.validate_order_type()
self.validate_delivery_date()
self.validate_mandatory()
self.validate_proj_cust()
self.validate_po()
self.validate_uom_is_integer("stock_uom", "qty")
self.validate_for_items()
self.validate_warehouse()
self.validate_drop_ship()
from erpnext.stock.doctype.packed_item.packed_item import make_packing_list
make_packing_list(self)
self.validate_with_previous_doc()
self.set_status()
if not self.billing_status: self.billing_status = 'Not Billed'
if not self.delivery_status: self.delivery_status = 'Not Delivered'
def validate_mandatory(self):
# validate transaction date v/s delivery date
if self.delivery_date:
if getdate(self.transaction_date) > getdate(self.delivery_date):
frappe.throw(_("Expected Delivery Date cannot be before Sales Order Date"))
def validate_po(self):
# validate p.o date v/s delivery date
if self.po_date and self.delivery_date and getdate(self.po_date) > getdate(self.delivery_date):
frappe.throw(_("Expected Delivery Date cannot be before Purchase Order Date"))
if self.po_no and self.customer:
so = frappe.db.sql("select name from `tabSales Order` \
where ifnull(po_no, '') = %s and name != %s and docstatus < 2\
and customer = %s", (self.po_no, self.name, self.customer))
if so and so[0][0] and not \
cint(frappe.db.get_single_value("Selling Settings", "allow_against_multiple_purchase_orders")):
frappe.msgprint(_("Warning: Sales Order {0} already exists against Customer's Purchase Order {1}").format(so[0][0], self.po_no))
def validate_for_items(self):
check_list = []
for d in self.get('items'):
check_list.append(cstr(d.item_code))
# used for production plan
d.transaction_date = self.transaction_date
tot_avail_qty = frappe.db.sql("select projected_qty from `tabBin` \
where item_code = %s and warehouse = %s", (d.item_code,d.warehouse))
d.projected_qty = tot_avail_qty and flt(tot_avail_qty[0][0]) or 0
# check for same entry multiple times
unique_chk_list = set(check_list)
if len(unique_chk_list) != len(check_list) and \
not cint(frappe.db.get_single_value("Selling Settings", "allow_multiple_items")):
frappe.msgprint(_("Warning: Same item has been entered multiple times."))
def product_bundle_has_stock_item(self, product_bundle):
"""Returns true if product bundle has stock item"""
ret = len(frappe.db.sql("""select i.name from tabItem i, `tabProduct Bundle Item` pbi
where pbi.parent = %s and pbi.item_code = i.name and i.is_stock_item = 1""", product_bundle))
return ret
def validate_sales_mntc_quotation(self):
for d in self.get('items'):
if d.prevdoc_docname:
res = frappe.db.sql("select name from `tabQuotation` where name=%s and order_type = %s", (d.prevdoc_docname, self.order_type))
if not res:
frappe.msgprint(_("Quotation {0} not of type {1}").format(d.prevdoc_docname, self.order_type))
def validate_order_type(self):
super(SalesOrder, self).validate_order_type()
def validate_delivery_date(self):
if self.order_type == 'Sales' and not self.delivery_date:
frappe.throw(_("Please enter 'Expected Delivery Date'"))
self.validate_sales_mntc_quotation()
def validate_proj_cust(self):
if self.project_name and self.customer_name:
res = frappe.db.sql("""select name from `tabProject` where name = %s
and (customer = %s or ifnull(customer,'')='')""",
(self.project_name, self.customer))
if not res:
frappe.throw(_("Customer {0} does not belong to project {1}").format(self.customer, self.project_name))
def validate_warehouse(self):
super(SalesOrder, self).validate_warehouse()
for d in self.get("items"):
if (frappe.db.get_value("Item", d.item_code, "is_stock_item")==1 or
(self.has_product_bundle(d.item_code) and self.product_bundle_has_stock_item(d.item_code))) \
and not d.warehouse and not cint(d.delivered_by_supplier):
frappe.throw(_("Delivery warehouse required for stock item {0}").format(d.item_code),
WarehouseRequired)
def validate_with_previous_doc(self):
super(SalesOrder, self).validate_with_previous_doc({
"Quotation": {
"ref_dn_field": "prevdoc_docname",
"compare_fields": [["company", "="], ["currency", "="]]
}
})
def update_enquiry_status(self, prevdoc, flag):
enq = frappe.db.sql("select t2.prevdoc_docname from `tabQuotation` t1, `tabQuotation Item` t2 where t2.parent = t1.name and t1.name=%s", prevdoc)
if enq:
frappe.db.sql("update `tabOpportunity` set status = %s where name=%s",(flag,enq[0][0]))
def update_prevdoc_status(self, flag):
for quotation in list(set([d.prevdoc_docname for d in self.get("items")])):
if quotation:
doc = frappe.get_doc("Quotation", quotation)
if doc.docstatus==2:
frappe.throw(_("Quotation {0} is cancelled").format(quotation))
doc.set_status(update=True)
doc.update_opportunity()
def validate_drop_ship(self):
for d in self.get('items'):
if d.delivered_by_supplier and not d.supplier:
frappe.throw(_("Row #{0}: Set Supplier for item {1}").format(d.idx, d.item_code))
def on_submit(self):
super(SalesOrder, self).on_submit()
self.check_credit_limit()
self.update_reserved_qty()
frappe.get_doc('Authorization Control').validate_approving_authority(self.doctype, self.base_grand_total, self)
self.update_prevdoc_status('submit')
def on_cancel(self):
# Cannot cancel stopped SO
if self.status == 'Stopped':
frappe.throw(_("Stopped order cannot be cancelled. Unstop to cancel."))
self.check_nextdoc_docstatus()
self.update_reserved_qty()
self.update_prevdoc_status('cancel')
frappe.db.set(self, 'status', 'Cancelled')
def check_credit_limit(self):
from erpnext.selling.doctype.customer.customer import check_credit_limit
check_credit_limit(self.customer, self.company)
def check_nextdoc_docstatus(self):
# Checks Delivery Note
submit_dn = frappe.db.sql_list("""select t1.name from `tabDelivery Note` t1,`tabDelivery Note Item` t2
where t1.name = t2.parent and t2.against_sales_order = %s and t1.docstatus = 1""", self.name)
if submit_dn:
frappe.throw(_("Delivery Notes {0} must be cancelled before cancelling this Sales Order").format(comma_and(submit_dn)))
# Checks Sales Invoice
submit_rv = frappe.db.sql_list("""select t1.name
from `tabSales Invoice` t1,`tabSales Invoice Item` t2
where t1.name = t2.parent and t2.sales_order = %s and t1.docstatus = 1""",
self.name)
if submit_rv:
frappe.throw(_("Sales Invoice {0} must be cancelled before cancelling this Sales Order").format(comma_and(submit_rv)))
#check maintenance schedule
submit_ms = frappe.db.sql_list("""select t1.name from `tabMaintenance Schedule` t1,
`tabMaintenance Schedule Item` t2
where t2.parent=t1.name and t2.prevdoc_docname = %s and t1.docstatus = 1""", self.name)
if submit_ms:
frappe.throw(_("Maintenance Schedule {0} must be cancelled before cancelling this Sales Order").format(comma_and(submit_ms)))
# check maintenance visit
submit_mv = frappe.db.sql_list("""select t1.name from `tabMaintenance Visit` t1, `tabMaintenance Visit Purpose` t2
where t2.parent=t1.name and t2.prevdoc_docname = %s and t1.docstatus = 1""",self.name)
if submit_mv:
frappe.throw(_("Maintenance Visit {0} must be cancelled before cancelling this Sales Order").format(comma_and(submit_mv)))
# check production order
pro_order = frappe.db.sql_list("""select name from `tabProduction Order`
where sales_order = %s and docstatus = 1""", self.name)
if pro_order:
frappe.throw(_("Production Order {0} must be cancelled before cancelling this Sales Order").format(comma_and(pro_order)))
def check_modified_date(self):
mod_db = frappe.db.get_value("Sales Order", self.name, "modified")
date_diff = frappe.db.sql("select TIMEDIFF('%s', '%s')" %
( mod_db, cstr(self.modified)))
if date_diff and date_diff[0][0]:
frappe.throw(_("{0} {1} has been modified. Please refresh.").format(self.doctype, self.name))
def update_status(self, status):
self.check_modified_date()
self.set_status(update=True, status=status)
self.update_reserved_qty()
self.notify_update()
clear_doctype_notifications(self)
def update_reserved_qty(self, so_item_rows=None):
"""update requested qty (before ordered_qty is updated)"""
item_wh_list = []
def _valid_for_reserve(item_code, warehouse):
if item_code and warehouse and [item_code, warehouse] not in item_wh_list \
and frappe.db.get_value("Item", item_code, "is_stock_item"):
item_wh_list.append([item_code, warehouse])
for d in self.get("items"):
if (not so_item_rows or d.name in so_item_rows) and not d.delivered_by_supplier:
if self.has_product_bundle(d.item_code):
for p in self.get("packed_items"):
if p.parent_detail_docname == d.name and p.parent_item == d.item_code:
_valid_for_reserve(p.item_code, p.warehouse)
else:
_valid_for_reserve(d.item_code, d.warehouse)
for item_code, warehouse in item_wh_list:
update_bin_qty(item_code, warehouse, {
"reserved_qty": get_reserved_qty(item_code, warehouse)
})
def on_update(self):
pass
def before_update_after_submit(self):
self.validate_drop_ship()
self.validate_supplier_after_submit()
def validate_supplier_after_submit(self):
"""Check that supplier is the same after submit if PO is already made"""
exc_list = []
for item in self.items:
if item.supplier:
supplier = frappe.db.get_value("Sales Order Item", {"parent": self.name, "item_code": item.item_code},
"supplier")
if item.ordered_qty > 0.0 and item.supplier != supplier:
exc_list.append(_("Row #{0}: Not allowed to change Supplier as Purchase Order already exists").format(item.idx))
if exc_list:
frappe.throw('\n'.join(exc_list))
def update_delivery_status(self):
"""Update delivery status from Purchase Order for drop shipping"""
tot_qty, delivered_qty = 0.0, 0.0
for item in self.items:
if item.delivered_by_supplier:
item_delivered_qty = frappe.db.sql("""select sum(qty)
from `tabPurchase Order Item` poi, `tabPurchase Order` po
where poi.prevdoc_detail_docname = %s
and poi.prevdoc_doctype = 'Sales Order'
and poi.item_code = %s
and poi.parent = po.name
and po.docstatus = 1
and po.status = 'Delivered'""", (item.name, item.item_code))
item_delivered_qty = item_delivered_qty[0][0] if item_delivered_qty else 0
item.db_set("delivered_qty", flt(item_delivered_qty), update_modified=False)
delivered_qty += item.delivered_qty
tot_qty += item.qty
frappe.db.set_value("Sales Order", self.name, "per_delivered", flt(delivered_qty/tot_qty) * 100,
update_modified=False)
def get_list_context(context=None):
from erpnext.controllers.website_list_for_contact import get_list_context
list_context = get_list_context(context)
list_context["title"] = _("My Orders")
return list_context
@frappe.whitelist()
def stop_or_unstop_sales_orders(names, status):
if not frappe.has_permission("Sales Order", "write"):
frappe.throw(_("Not permitted"), frappe.PermissionError)
names = json.loads(names)
for name in names:
so = frappe.get_doc("Sales Order", name)
if so.docstatus == 1:
if status in ("Stopped", "Closed"):
if so.status not in ("Stopped", "Cancelled", "Closed") and (so.per_delivered < 100 or so.per_billed < 100):
so.update_status(status)
else:
if so.status in ("Stopped", "Closed"):
so.update_status('Draft')
frappe.local.message_log = []
def before_recurring(self):
super(SalesOrder, self).before_recurring()
for field in ("delivery_status", "per_delivered", "billing_status", "per_billed"):
self.set(field, None)
for d in self.get("items"):
for field in ("delivered_qty", "billed_amt", "planned_qty", "prevdoc_docname"):
d.set(field, None)
@frappe.whitelist()
def make_material_request(source_name, target_doc=None):
def postprocess(source, doc):
doc.material_request_type = "Purchase"
so = frappe.get_doc("Sales Order", source_name)
item_table = "Packed Item" if so.packed_items else "Sales Order Item"
doc = get_mapped_doc("Sales Order", source_name, {
"Sales Order": {
"doctype": "Material Request",
"validation": {
"docstatus": ["=", 1]
}
},
item_table: {
"doctype": "Material Request Item",
"field_map": {
"parent": "sales_order_no",
"stock_uom": "uom"
}
}
}, target_doc, postprocess)
return doc
@frappe.whitelist()
def make_delivery_note(source_name, target_doc=None):
def set_missing_values(source, target):
if source.po_no:
if target.po_no:
target_po_no = target.po_no.split(", ")
target_po_no.append(source.po_no)
target.po_no = ", ".join(list(set(target_po_no))) if len(target_po_no) > 1 else target_po_no[0]
else:
target.po_no = source.po_no
target.ignore_pricing_rule = 1
target.run_method("set_missing_values")
target.run_method("calculate_taxes_and_totals")
def update_item(source, target, source_parent):
target.base_amount = (flt(source.qty) - flt(source.delivered_qty)) * flt(source.base_rate)
target.amount = (flt(source.qty) - flt(source.delivered_qty)) * flt(source.rate)
target.qty = flt(source.qty) - flt(source.delivered_qty)
target_doc = get_mapped_doc("Sales Order", source_name, {
"Sales Order": {
"doctype": "Delivery Note",
"validation": {
"docstatus": ["=", 1]
}
},
"Sales Order Item": {
"doctype": "Delivery Note Item",
"field_map": {
"rate": "rate",
"name": "so_detail",
"parent": "against_sales_order",
},
"postprocess": update_item,
"condition": lambda doc: abs(doc.delivered_qty) < abs(doc.qty) and doc.delivered_by_supplier!=1
},
"Sales Taxes and Charges": {
"doctype": "Sales Taxes and Charges",
"add_if_empty": True
},
"Sales Team": {
"doctype": "Sales Team",
"add_if_empty": True
}
}, target_doc, set_missing_values)
return target_doc
@frappe.whitelist()
def make_sales_invoice(source_name, target_doc=None):
def postprocess(source, target):
set_missing_values(source, target)
#Get the advance paid Journal Entries in Sales Invoice Advance
target.get_advances()
def set_missing_values(source, target):
target.is_pos = 0
target.ignore_pricing_rule = 1
target.run_method("set_missing_values")
target.run_method("calculate_taxes_and_totals")
def update_item(source, target, source_parent):
target.amount = flt(source.amount) - flt(source.billed_amt)
target.base_amount = target.amount * flt(source_parent.conversion_rate)
target.qty = target.amount / flt(source.rate) if (source.rate and source.billed_amt) else source.qty
doclist = get_mapped_doc("Sales Order", source_name, {
"Sales Order": {
"doctype": "Sales Invoice",
"validation": {
"docstatus": ["=", 1]
}
},
"Sales Order Item": {
"doctype": "Sales Invoice Item",
"field_map": {
"name": "so_detail",
"parent": "sales_order",
},
"postprocess": update_item,
"condition": lambda doc: doc.qty and (doc.base_amount==0 or abs(doc.billed_amt) < abs(doc.amount))
},
"Sales Taxes and Charges": {
"doctype": "Sales Taxes and Charges",
"add_if_empty": True
},
"Sales Team": {
"doctype": "Sales Team",
"add_if_empty": True
}
}, target_doc, postprocess)
return doclist
@frappe.whitelist()
def make_maintenance_schedule(source_name, target_doc=None):
maint_schedule = frappe.db.sql("""select t1.name
from `tabMaintenance Schedule` t1, `tabMaintenance Schedule Item` t2
where t2.parent=t1.name and t2.prevdoc_docname=%s and t1.docstatus=1""", source_name)
if not maint_schedule:
doclist = get_mapped_doc("Sales Order", source_name, {
"Sales Order": {
"doctype": "Maintenance Schedule",
"field_map": {
"name": "sales_order_no"
},
"validation": {
"docstatus": ["=", 1]
}
},
"Sales Order Item": {
"doctype": "Maintenance Schedule Item",
"field_map": {
"parent": "prevdoc_docname"
},
"add_if_empty": True
}
}, target_doc)
return doclist
@frappe.whitelist()
def make_maintenance_visit(source_name, target_doc=None):
visit = frappe.db.sql("""select t1.name
from `tabMaintenance Visit` t1, `tabMaintenance Visit Purpose` t2
where t2.parent=t1.name and t2.prevdoc_docname=%s
and t1.docstatus=1 and t1.completion_status='Fully Completed'""", source_name)
if not visit:
doclist = get_mapped_doc("Sales Order", source_name, {
"Sales Order": {
"doctype": "Maintenance Visit",
"field_map": {
"name": "sales_order_no"
},
"validation": {
"docstatus": ["=", 1]
}
},
"Sales Order Item": {
"doctype": "Maintenance Visit Purpose",
"field_map": {
"parent": "prevdoc_docname",
"parenttype": "prevdoc_doctype"
},
"add_if_empty": True
}
}, target_doc)
return doclist
@frappe.whitelist()
def get_events(start, end, filters=None):
"""Returns events for Gantt / Calendar view rendering.
:param start: Start date-time.
:param end: End date-time.
:param filters: Filters (JSON).
"""
from frappe.desk.calendar import get_event_conditions
conditions = get_event_conditions("Sales Order", filters)
data = frappe.db.sql("""select name, customer_name, delivery_status, billing_status, delivery_date
from `tabSales Order`
where (ifnull(delivery_date, '0000-00-00')!= '0000-00-00') \
and (delivery_date between %(start)s and %(end)s)
and docstatus < 2
{conditions}
""".format(conditions=conditions), {
"start": start,
"end": end
}, as_dict=True, update={"allDay": 0})
return data
@frappe.whitelist()
def make_purchase_order_for_drop_shipment(source_name, for_supplier, target_doc=None):
def set_missing_values(source, target):
target.supplier = for_supplier
default_price_list = frappe.get_value("Supplier", for_supplier, "default_price_list")
if default_price_list:
target.buying_price_list = default_price_list
if any( item.delivered_by_supplier==1 for item in source.items):
if source.shipping_address_name:
target.customer_address = source.shipping_address_name
target.customer_address_display = source.shipping_address
else:
target.customer_address = source.customer_address
target.customer_address_display = source.address_display
target.customer_contact_person = source.contact_person
target.customer_contact_display = source.contact_display
target.customer_contact_mobile = source.contact_mobile
target.customer_contact_email = source.contact_email
else:
target.customer = ""
target.customer_name = ""
target.run_method("set_missing_values")
target.run_method("calculate_taxes_and_totals")
def update_item(source, target, source_parent):
target.schedule_date = source_parent.delivery_date
target.qty = flt(source.qty) - flt(source.ordered_qty)
doclist = get_mapped_doc("Sales Order", source_name, {
"Sales Order": {
"doctype": "Purchase Order",
"field_no_map": [
"address_display",
"contact_display",
"contact_mobile",
"contact_email",
"contact_person"
],
"validation": {
"docstatus": ["=", 1]
}
},
"Sales Order Item": {
"doctype": "Purchase Order Item",
"field_map": [
["name", "prevdoc_detail_docname"],
["parent", "prevdoc_docname"],
["parenttype", "prevdoc_doctype"],
["uom", "stock_uom"],
["delivery_date", "schedule_date"]
],
"field_no_map": [
"rate",
"price_list_rate"
],
"postprocess": update_item,
"condition": lambda doc: doc.ordered_qty < doc.qty and doc.supplier == for_supplier
}
}, target_doc, set_missing_values)
return doclist
@frappe.whitelist()
def get_supplier(doctype, txt, searchfield, start, page_len, filters):
supp_master_name = frappe.defaults.get_user_default("supp_master_name")
if supp_master_name == "Supplier Name":
fields = ["name", "supplier_type"]
else:
fields = ["name", "supplier_name", "supplier_type"]
fields = ", ".join(fields)
return frappe.db.sql("""select {field} from `tabSupplier`
where docstatus < 2
and ({key} like %(txt)s
or supplier_name like %(txt)s)
and name in (select supplier from `tabSales Order Item` where parent = %(parent)s)
order by
if(locate(%(_txt)s, name), locate(%(_txt)s, name), 99999),
if(locate(%(_txt)s, supplier_name), locate(%(_txt)s, supplier_name), 99999),
name, supplier_name
limit %(start)s, %(page_len)s """.format(**{
'field': fields,
'key': frappe.db.escape(searchfield)
}), {
'txt': "%%%s%%" % txt,
'_txt': txt.replace("%", ""),
'start': start,
'page_len': page_len,
'parent': filters.get('parent')
})
@frappe.whitelist()
def update_status(status, name):
so = frappe.get_doc("Sales Order", name)
so.update_status(status)
|
mbauskar/helpdesk-erpnext
|
erpnext/selling/doctype/sales_order/sales_order.py
|
Python
|
agpl-3.0
| 21,507
|
[
"VisIt"
] |
db1c290b400d807e6703ee3167ca9015148b4e8775db73c095cedea3529081b4
|
##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2017 Stanford University and the Authors
#
# Authors: Robert McGibbon
# Contributors:
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
import os
import numpy as np
import pytest
import mdtraj as md
from mdtraj.formats import HDF5TrajectoryFile, NetCDFTrajectoryFile
from mdtraj.reporters import HDF5Reporter, NetCDFReporter, DCDReporter
from mdtraj.testing import eq
try:
from simtk.unit import nanometers, kelvin, picoseconds, femtoseconds
from simtk.openmm import LangevinIntegrator, Platform
from simtk.openmm.app import PDBFile, ForceField, Simulation, CutoffNonPeriodic, CutoffPeriodic, HBonds
HAVE_OPENMM = True
except ImportError:
HAVE_OPENMM = False
# special pytest global to mark all tests in this module
pytestmark = pytest.mark.skipif(not HAVE_OPENMM, reason='test_reporter.py needs OpenMM.')
def test_reporter(tmpdir, get_fn):
pdb = PDBFile(get_fn('native.pdb'))
forcefield = ForceField('amber99sbildn.xml', 'amber99_obc.xml')
# NO PERIODIC BOUNDARY CONDITIONS
system = forcefield.createSystem(pdb.topology, nonbondedMethod=CutoffNonPeriodic,
nonbondedCutoff=1.0 * nanometers, constraints=HBonds, rigidWater=True)
integrator = LangevinIntegrator(300 * kelvin, 1.0 / picoseconds, 2.0 * femtoseconds)
integrator.setConstraintTolerance(0.00001)
platform = Platform.getPlatformByName('Reference')
simulation = Simulation(pdb.topology, system, integrator, platform)
simulation.context.setPositions(pdb.positions)
simulation.context.setVelocitiesToTemperature(300 * kelvin)
tmpdir = str(tmpdir)
hdf5file = os.path.join(tmpdir, 'traj.h5')
ncfile = os.path.join(tmpdir, 'traj.nc')
dcdfile = os.path.join(tmpdir, 'traj.dcd')
reporter = HDF5Reporter(hdf5file, 2, coordinates=True, time=True,
cell=True, potentialEnergy=True, kineticEnergy=True, temperature=True,
velocities=True)
reporter2 = NetCDFReporter(ncfile, 2, coordinates=True, time=True, cell=True)
reporter3 = DCDReporter(dcdfile, 2)
simulation.reporters.append(reporter)
simulation.reporters.append(reporter2)
simulation.reporters.append(reporter3)
simulation.step(100)
reporter.close()
reporter2.close()
reporter3.close()
with HDF5TrajectoryFile(hdf5file) as f:
got = f.read()
eq(got.temperature.shape, (50,))
eq(got.potentialEnergy.shape, (50,))
eq(got.kineticEnergy.shape, (50,))
eq(got.coordinates.shape, (50, 22, 3))
eq(got.velocities.shape, (50, 22, 3))
eq(got.cell_lengths, None)
eq(got.cell_angles, None)
eq(got.time, 0.002 * 2 * (1 + np.arange(50)))
assert f.topology == md.load(get_fn('native.pdb')).top
with NetCDFTrajectoryFile(ncfile) as f:
xyz, time, cell_lengths, cell_angles = f.read()
eq(cell_lengths, None)
eq(cell_angles, None)
eq(time, 0.002 * 2 * (1 + np.arange(50)))
hdf5_traj = md.load(hdf5file)
dcd_traj = md.load(dcdfile, top=get_fn('native.pdb'))
netcdf_traj = md.load(ncfile, top=get_fn('native.pdb'))
# we don't have to convert units here, because md.load already
# handles that
assert hdf5_traj.unitcell_vectors is None
eq(hdf5_traj.xyz, netcdf_traj.xyz)
eq(hdf5_traj.unitcell_vectors, netcdf_traj.unitcell_vectors)
eq(hdf5_traj.time, netcdf_traj.time)
eq(dcd_traj.xyz, hdf5_traj.xyz)
# yield lambda: eq(dcd_traj.unitcell_vectors, hdf5_traj.unitcell_vectors)
def test_reporter_subset(tmpdir, get_fn):
pdb = PDBFile(get_fn('native2.pdb'))
pdb.topology.setUnitCellDimensions([2, 2, 2])
forcefield = ForceField('amber99sbildn.xml', 'amber99_obc.xml')
system = forcefield.createSystem(pdb.topology, nonbondedMethod=CutoffPeriodic,
nonbondedCutoff=1 * nanometers, constraints=HBonds, rigidWater=True)
integrator = LangevinIntegrator(300 * kelvin, 1.0 / picoseconds, 2.0 * femtoseconds)
integrator.setConstraintTolerance(0.00001)
platform = Platform.getPlatformByName('Reference')
simulation = Simulation(pdb.topology, system, integrator, platform)
simulation.context.setPositions(pdb.positions)
simulation.context.setVelocitiesToTemperature(300 * kelvin)
tmpdir = str(tmpdir)
hdf5file = os.path.join(tmpdir, 'traj.h5')
ncfile = os.path.join(tmpdir, 'traj.nc')
dcdfile = os.path.join(tmpdir, 'traj.dcd')
atomSubset = [0, 1, 2, 4, 5]
reporter = HDF5Reporter(hdf5file, 2, coordinates=True, time=True,
cell=True, potentialEnergy=True, kineticEnergy=True, temperature=True,
velocities=True, atomSubset=atomSubset)
reporter2 = NetCDFReporter(ncfile, 2, coordinates=True, time=True,
cell=True, atomSubset=atomSubset)
reporter3 = DCDReporter(dcdfile, 2, atomSubset=atomSubset)
simulation.reporters.append(reporter)
simulation.reporters.append(reporter2)
simulation.reporters.append(reporter3)
simulation.step(100)
reporter.close()
reporter2.close()
reporter3.close()
t = md.load(get_fn('native.pdb'))
t.restrict_atoms(atomSubset)
with HDF5TrajectoryFile(hdf5file) as f:
got = f.read()
eq(got.temperature.shape, (50,))
eq(got.potentialEnergy.shape, (50,))
eq(got.kineticEnergy.shape, (50,))
eq(got.coordinates.shape, (50, len(atomSubset), 3))
eq(got.velocities.shape, (50, len(atomSubset), 3))
eq(got.cell_lengths, 2 * np.ones((50, 3)))
eq(got.cell_angles, 90 * np.ones((50, 3)))
eq(got.time, 0.002 * 2 * (1 + np.arange(50)))
assert f.topology == md.load(get_fn('native.pdb'), atom_indices=atomSubset).topology
with NetCDFTrajectoryFile(ncfile) as f:
xyz, time, cell_lengths, cell_angles = f.read()
eq(cell_lengths, 20 * np.ones((50, 3)))
eq(cell_angles, 90 * np.ones((50, 3)))
eq(time, 0.002 * 2 * (1 + np.arange(50)))
eq(xyz.shape, (50, len(atomSubset), 3))
hdf5_traj = md.load(hdf5file)
dcd_traj = md.load(dcdfile, top=hdf5_traj)
netcdf_traj = md.load(ncfile, top=hdf5_traj)
# we don't have to convert units here, because md.load already handles that
eq(hdf5_traj.xyz, netcdf_traj.xyz)
eq(hdf5_traj.unitcell_vectors, netcdf_traj.unitcell_vectors)
eq(hdf5_traj.time, netcdf_traj.time)
eq(dcd_traj.xyz, hdf5_traj.xyz)
eq(dcd_traj.unitcell_vectors, hdf5_traj.unitcell_vectors)
|
leeping/mdtraj
|
tests/test_reporter.py
|
Python
|
lgpl-2.1
| 7,439
|
[
"MDTraj",
"OpenMM"
] |
633eafe8843b95bfa8c4355eb28f2262dc29f8f815afa42cd83803039e0beb13
|
# Copyright 2008, 2009 CAMd
# (see accompanying license files for details).
"""Definition of the Atoms class.
This module defines the central object in the ASE package: the Atoms
object.
"""
import warnings
from math import cos, sin
import numpy as np
from ase.atom import Atom
from ase.data import atomic_numbers, chemical_symbols, atomic_masses
import ase.units as units
class Atoms(object):
"""Atoms object.
The Atoms object can represent an isolated molecule, or a
periodically repeated structure. It has a unit cell and
there may be periodic boundary conditions along any of the three
unit cell axes.
Information about the atoms (atomic numbers and position) is
stored in ndarrays. Optionally, there can be information about
tags, momenta, masses, magnetic moments and charges.
In order to calculate energies, forces and stresses, a calculator
object has to attached to the atoms object.
Parameters:
symbols: str (formula) or list of str
Can be a string formula, a list of symbols or a list of
Atom objects. Examples: 'H2O', 'COPt12', ['H', 'H', 'O'],
[Atom('Ne', (x, y, z)), ...].
positions: list of xyz-positions
Atomic positions. Anything that can be converted to an
ndarray of shape (n, 3) will do: [(x1,y1,z1), (x2,y2,z2),
...].
scaled_positions: list of scaled-positions
Like positions, but given in units of the unit cell.
Can not be set at the same time as positions.
numbers: list of int
Atomic numbers (use only one of symbols/numbers).
tags: list of int
Special purpose tags.
momenta: list of xyz-momenta
Momenta for all atoms.
masses: list of float
Atomic masses in atomic units.
magmoms: list of float or list of xyz-values
Magnetic moments. Can be either a single value for each atom
for collinear calculations or three numbers for each atom for
non-collinear calculations.
charges: list of float
Atomic charges.
cell: 3x3 matrix
Unit cell vectors. Can also be given as just three
numbers for orthorhombic cells. Default value: [1, 1, 1].
celldisp: Vector
Unit cell displacement vector. To visualize a displaced cell
around the center of mass of a Systems of atoms. Default value
= (0,0,0)
pbc: one or three bool
Periodic boundary conditions flags. Examples: True,
False, 0, 1, (1, 1, 0), (True, False, False). Default
value: False.
constraint: constraint object(s)
Used for applying one or more constraints during structure
optimization.
calculator: calculator object
Used to attach a calculator for calculating energies and atomic
forces.
info: dict of key-value pairs
Dictionary of key-value pairs with additional information
about the system. The following keys may be used by ase:
- spacegroup: Spacegroup instance
- unit_cell: 'conventional' | 'primitive' | int | 3 ints
- adsorbate_info:
Items in the info attribute survives copy and slicing and can
be store to and retrieved from trajectory files given that the
key is a string, the value is picklable and, if the value is a
user-defined object, its base class is importable. One should
not make any assumptions about the existence of keys.
Examples:
These three are equivalent:
>>> d = 1.104 # N2 bondlength
>>> a = Atoms('N2', [(0, 0, 0), (0, 0, d)])
>>> a = Atoms(numbers=[7, 7], positions=[(0, 0, 0), (0, 0, d)])
>>> a = Atoms([Atom('N', (0, 0, 0)), Atom('N', (0, 0, d)])
FCC gold:
>>> a = 4.05 # Gold lattice constant
>>> b = a / 2
>>> fcc = Atoms('Au',
... cell=[(0, b, b), (b, 0, b), (b, b, 0)],
... pbc=True)
Hydrogen wire:
>>> d = 0.9 # H-H distance
>>> L = 7.0
>>> h = Atoms('H', positions=[(0, L / 2, L / 2)],
... cell=(d, L, L),
... pbc=(1, 0, 0))
"""
def __init__(self, symbols=None,
positions=None, numbers=None,
tags=None, momenta=None, masses=None,
magmoms=None, charges=None,
scaled_positions=None,
cell=None, pbc=None, celldisp=None,
constraint=None,
calculator=None,
info=None):
atoms = None
if hasattr(symbols, 'get_positions'):
atoms = symbols
symbols = None
elif (isinstance(symbols, (list, tuple)) and
len(symbols) > 0 and isinstance(symbols[0], Atom)):
# Get data from a list or tuple of Atom objects:
data = [[atom.get_raw(name) for atom in symbols]
for name in
['position', 'number', 'tag', 'momentum',
'mass', 'magmom', 'charge']]
atoms = self.__class__(None, *data)
symbols = None
if atoms is not None:
# Get data from another Atoms object:
if scaled_positions is not None:
raise NotImplementedError
if symbols is None and numbers is None:
numbers = atoms.get_atomic_numbers()
if positions is None:
positions = atoms.get_positions()
if tags is None and atoms.has('tags'):
tags = atoms.get_tags()
if momenta is None and atoms.has('momenta'):
momenta = atoms.get_momenta()
if magmoms is None and atoms.has('magmoms'):
magmoms = atoms.get_initial_magnetic_moments()
if masses is None and atoms.has('masses'):
masses = atoms.get_masses()
if charges is None and atoms.has('charges'):
charges = atoms.get_initial_charges()
if cell is None:
cell = atoms.get_cell()
if celldisp is None:
celldisp = atoms.get_celldisp()
if pbc is None:
pbc = atoms.get_pbc()
if constraint is None:
constraint = [c.copy() for c in atoms.constraints]
if calculator is None:
calculator = atoms.get_calculator()
self.arrays = {}
if symbols is None:
if numbers is None:
if positions is not None:
natoms = len(positions)
elif scaled_positions is not None:
natoms = len(scaled_positions)
else:
natoms = 0
numbers = np.zeros(natoms, int)
self.new_array('numbers', numbers, int)
else:
if numbers is not None:
raise ValueError(
'Use only one of "symbols" and "numbers".')
else:
self.new_array('numbers', symbols2numbers(symbols), int)
if cell is None:
cell = np.eye(3)
self.set_cell(cell)
if celldisp is None:
celldisp = np.zeros(shape=(3, 1))
self.set_celldisp(celldisp)
if positions is None:
if scaled_positions is None:
positions = np.zeros((len(self.arrays['numbers']), 3))
else:
positions = np.dot(scaled_positions, self._cell)
else:
if scaled_positions is not None:
raise RuntimeError('Both scaled and cartesian positions set!')
self.new_array('positions', positions, float, (3,))
self.set_constraint(constraint)
self.set_tags(default(tags, 0))
self.set_momenta(default(momenta, (0.0, 0.0, 0.0)))
self.set_masses(default(masses, None))
self.set_initial_magnetic_moments(default(magmoms, 0.0))
self.set_initial_charges(default(charges, 0.0))
if pbc is None:
pbc = False
self.set_pbc(pbc)
if info is None:
self.info = {}
else:
self.info = dict(info)
self.adsorbate_info = {}
self.set_calculator(calculator)
def set_calculator(self, calc=None):
"""Attach calculator object."""
if hasattr(calc, '_SetListOfAtoms'):
from ase.old import OldASECalculatorWrapper
calc = OldASECalculatorWrapper(calc, self)
if hasattr(calc, 'set_atoms'):
calc.set_atoms(self)
self._calc = calc
def get_calculator(self):
"""Get currently attached calculator object."""
return self._calc
def _del_calculator(self):
self._calc = None
calc = property(get_calculator, set_calculator, _del_calculator,
doc='Calculator object.')
def set_constraint(self, constraint=None):
"""Apply one or more constrains.
The *constraint* argument must be one constraint object or a
list of constraint objects."""
if constraint is None:
self._constraints = []
else:
if isinstance(constraint, (list, tuple)):
self._constraints = constraint
else:
self._constraints = [constraint]
def _get_constraints(self):
return self._constraints
def _del_constraints(self):
self._constraints = []
constraints = property(_get_constraints, set_constraint, _del_constraints,
'Constraints of the atoms.')
def set_cell(self, cell, scale_atoms=False, fix=None):
"""Set unit cell vectors.
Parameters:
cell :
Unit cell. A 3x3 matrix (the three unit cell vectors) or
just three numbers for an orthorhombic cell.
scale_atoms : bool
Fix atomic positions or move atoms with the unit cell?
Default behavior is to *not* move the atoms (scale_atoms=False).
Examples:
Two equivalent ways to define an orthorhombic cell:
>>> a.set_cell([a, b, c])
>>> a.set_cell([(a, 0, 0), (0, b, 0), (0, 0, c)])
FCC unit cell:
>>> a.set_cell([(0, b, b), (b, 0, b), (b, b, 0)])
"""
if fix is not None:
raise TypeError('Please use scale_atoms=%s' % (not fix))
cell = np.array(cell, float)
if cell.shape == (3,):
cell = np.diag(cell)
elif cell.shape != (3, 3):
raise ValueError('Cell must be length 3 sequence or '
'3x3 matrix!')
if scale_atoms:
M = np.linalg.solve(self._cell, cell)
self.arrays['positions'][:] = np.dot(self.arrays['positions'], M)
self._cell = cell
def set_celldisp(self, celldisp):
celldisp = np.array(celldisp, float)
self._celldisp = celldisp
def get_celldisp(self):
"""Get the unit cell displacement vectors ."""
return self._celldisp.copy()
def get_cell(self):
"""Get the three unit cell vectors as a 3x3 ndarray."""
return self._cell.copy()
def get_reciprocal_cell(self):
"""Get the three reciprocal lattice vectors as a 3x3 ndarray.
Note that the commonly used factor of 2 pi for Fourier
transforms is not included here."""
rec_unit_cell = np.linalg.inv(self.get_cell()).transpose()
return rec_unit_cell
def set_pbc(self, pbc):
"""Set periodic boundary condition flags."""
if isinstance(pbc, int):
pbc = (pbc,) * 3
self._pbc = np.array(pbc, bool)
def get_pbc(self):
"""Get periodic boundary condition flags."""
return self._pbc.copy()
def new_array(self, name, a, dtype=None, shape=None):
"""Add new array.
If *shape* is not *None*, the shape of *a* will be checked."""
if dtype is not None:
a = np.array(a, dtype)
if len(a) == 0 and shape is not None:
a.shape = (-1,) + shape
else:
a = a.copy()
if name in self.arrays:
raise RuntimeError
for b in self.arrays.values():
if len(a) != len(b):
raise ValueError('Array has wrong length: %d != %d.' %
(len(a), len(b)))
break
if shape is not None and a.shape[1:] != shape:
raise ValueError('Array has wrong shape %s != %s.' %
(a.shape, (a.shape[0:1] + shape)))
self.arrays[name] = a
def get_array(self, name, copy=True):
"""Get an array.
Returns a copy unless the optional argument copy is false.
"""
if copy:
return self.arrays[name].copy()
else:
return self.arrays[name]
def set_array(self, name, a, dtype=None, shape=None):
"""Update array.
If *shape* is not *None*, the shape of *a* will be checked.
If *a* is *None*, then the array is deleted."""
b = self.arrays.get(name)
if b is None:
if a is not None:
self.new_array(name, a, dtype, shape)
else:
if a is None:
del self.arrays[name]
else:
a = np.asarray(a)
if a.shape != b.shape:
raise ValueError('Array has wrong shape %s != %s.' %
(a.shape, b.shape))
b[:] = a
def has(self, name):
"""Check for existence of array.
name must be one of: 'tags', 'momenta', 'masses', 'magmoms',
'charges'."""
return name in self.arrays
def set_atomic_numbers(self, numbers):
"""Set atomic numbers."""
self.set_array('numbers', numbers, int, ())
def get_atomic_numbers(self):
"""Get integer array of atomic numbers."""
return self.arrays['numbers'].copy()
def get_chemical_symbols(self):
"""Get list of chemical symbol strings."""
return [chemical_symbols[Z] for Z in self.arrays['numbers']]
def set_chemical_symbols(self, symbols):
"""Set chemical symbols."""
self.set_array('numbers', symbols2numbers(symbols), int, ())
def get_chemical_formula(self, mode='hill'):
"""Get the chemial formula as a string based on the chemical symbols.
Parameters:
mode:
There are three different modes available:
'all': The list of chemical symbols are contracted to at string,
e.g. ['C', 'H', 'H', 'H', 'O', 'H'] becomes 'CHHHOH'.
'reduce': The same as 'all' where repeated elements are contracted
to a single symbol and a number, e.g. 'CHHHOCHHH' is reduced to
'CH3OCH3'.
'hill': The list of chemical symbols are contracted to a string
following the Hill notation (alphabetical order with C and H
first), e.g. 'CHHHOCHHH' is reduced to 'C2H6O' and 'SOOHOHO' to
'H2O4S'. This is default.
"""
if len(self) == 0:
return ''
if mode == 'reduce':
numbers = self.get_atomic_numbers()
n = len(numbers)
changes = np.concatenate(([0], np.arange(1, n)[numbers[1:] !=
numbers[:-1]]))
symbols = [chemical_symbols[e] for e in numbers[changes]]
counts = np.append(changes[1:], n) - changes
elif mode == 'hill':
numbers = self.get_atomic_numbers()
elements = np.unique(numbers)
symbols = np.array([chemical_symbols[e] for e in elements])
counts = np.array([(numbers == e).sum() for e in elements])
ind = symbols.argsort()
symbols = symbols[ind]
counts = counts[ind]
if 'H' in symbols:
i = np.arange(len(symbols))[symbols == 'H']
symbols = np.insert(np.delete(symbols, i), 0, symbols[i])
counts = np.insert(np.delete(counts, i), 0, counts[i])
if 'C' in symbols:
i = np.arange(len(symbols))[symbols == 'C']
symbols = np.insert(np.delete(symbols, i), 0, symbols[i])
counts = np.insert(np.delete(counts, i), 0, counts[i])
elif mode == 'all':
numbers = self.get_atomic_numbers()
symbols = [chemical_symbols[n] for n in numbers]
counts = [1] * len(numbers)
else:
raise ValueError("Use mode = 'all', 'reduce' or 'hill'.")
formula = ''
for s, c in zip(symbols, counts):
formula += s
if c > 1:
formula += str(c)
return formula
def set_tags(self, tags):
"""Set tags for all atoms. If only one tag is supplied, it is
applied to all atoms."""
if type(tags) == int:
tags = [tags] * len(self)
self.set_array('tags', tags, int, ())
def get_tags(self):
"""Get integer array of tags."""
if 'tags' in self.arrays:
return self.arrays['tags'].copy()
else:
return np.zeros(len(self), int)
def set_momenta(self, momenta):
"""Set momenta."""
if len(self.constraints) > 0 and momenta is not None:
momenta = np.array(momenta) # modify a copy
for constraint in self.constraints:
if hasattr(constraint, 'adjust_momenta'):
constraint.adjust_momenta(self.arrays['positions'],
momenta)
self.set_array('momenta', momenta, float, (3,))
def set_velocities(self, velocities):
"""Set the momenta by specifying the velocities."""
self.set_momenta(self.get_masses()[:, np.newaxis] * velocities)
def get_momenta(self):
"""Get array of momenta."""
if 'momenta' in self.arrays:
return self.arrays['momenta'].copy()
else:
return np.zeros((len(self), 3))
def set_masses(self, masses='defaults'):
"""Set atomic masses.
The array masses should contain a list of masses. In case
the masses argument is not given or for those elements of the
masses list that are None, standard values are set."""
if masses == 'defaults':
masses = atomic_masses[self.arrays['numbers']]
elif isinstance(masses, (list, tuple)):
newmasses = []
for m, Z in zip(masses, self.arrays['numbers']):
if m is None:
newmasses.append(atomic_masses[Z])
else:
newmasses.append(m)
masses = newmasses
self.set_array('masses', masses, float, ())
def get_masses(self):
"""Get array of masses."""
if 'masses' in self.arrays:
return self.arrays['masses'].copy()
else:
return atomic_masses[self.arrays['numbers']]
def set_initial_magnetic_moments(self, magmoms=None):
"""Set the initial magnetic moments.
Use either one or three numbers for every atom (collinear
or non-collinear spins)."""
if magmoms is None:
self.set_array('magmoms', None)
else:
magmoms = np.asarray(magmoms)
self.set_array('magmoms', magmoms, float, magmoms.shape[1:])
def get_initial_magnetic_moments(self):
"""Get array of initial magnetic moments."""
if 'magmoms' in self.arrays:
return self.arrays['magmoms'].copy()
else:
return np.zeros(len(self))
def get_magnetic_moments(self):
"""Get calculated local magnetic moments."""
if self._calc is None:
raise RuntimeError('Atoms object has no calculator.')
return self._calc.get_magnetic_moments(self)
def get_magnetic_moment(self):
"""Get calculated total magnetic moment."""
if self._calc is None:
raise RuntimeError('Atoms object has no calculator.')
return self._calc.get_magnetic_moment(self)
def set_initial_charges(self, charges=None):
"""Set the initial charges."""
if charges is None:
self.set_array('charges', None)
else:
self.set_array('charges', charges, float, ())
def get_initial_charges(self):
"""Get array of initial charges."""
if 'charges' in self.arrays:
#print 'the array is set'
return self.arrays['charges'].copy()
else:
#print 'the array is not set'
return np.zeros(len(self))
def get_charges(self):
"""Get calculated charges."""
if self._calc is None:
raise RuntimeError('Atoms object has no calculator.')
#try:
# return self._calc.get_charges(self)
#except AttributeError:
# raise NotImplementedError
return self._calc.get_charges(self)
def set_positions(self, newpositions):
"""Set positions, honoring any constraints."""
positions = self.arrays['positions']
if self.constraints:
newpositions = np.array(newpositions, float)
for constraint in self.constraints:
constraint.adjust_positions(positions, newpositions)
self.set_array('positions', newpositions, shape=(3,))
def get_positions(self, wrap=False):
"""Get array of positions. If wrap==True, wraps atoms back
into unit cell.
"""
if wrap:
scaled = self.get_scaled_positions()
return np.dot(scaled, self._cell)
else:
return self.arrays['positions'].copy()
def get_calculation_done(self):
"""Let the calculator calculate its thing,
using the current input.
"""
if self.calc is None:
raise RuntimeError('Atoms object has no calculator.')
self.calc.initialize(self)
self.calc.calculate(self)
def get_potential_energy(self, force_consistent=False,
apply_constraint=True):
"""Calculate potential energy.
Ask the attached calculator to calculate the potential energy and
apply constraints. Use *apply_constraint=False* to get the raw
forces.
When supported by the calculator, either the energy extrapolated
to zero Kelvin or the energy consistent with the forces (the free
energy) can be returned.
"""
if self._calc is None:
raise RuntimeError('Atoms object has no calculator.')
if force_consistent:
energy = self._calc.get_potential_energy(
self, force_consistent=force_consistent)
else:
energy = self._calc.get_potential_energy(self)
if apply_constraint:
constraints = [c for c in self.constraints
if hasattr(c, 'adjust_potential_energy')]
for constraint in constraints:
energy += constraint.adjust_potential_energy(
self.arrays['positions'], energy)
return energy
def get_potential_energies(self):
"""Calculate the potential energies of all the atoms.
Only available with calculators supporting per-atom energies
(e.g. classical potentials).
"""
if self._calc is None:
raise RuntimeError('Atoms object has no calculator.')
return self._calc.get_potential_energies(self)
def get_kinetic_energy(self):
"""Get the kinetic energy."""
momenta = self.arrays.get('momenta')
if momenta is None:
return 0.0
return 0.5 * np.vdot(momenta, self.get_velocities())
def get_velocities(self):
"""Get array of velocities."""
momenta = self.arrays.get('momenta')
if momenta is None:
return None
m = self.arrays.get('masses')
if m is None:
m = atomic_masses[self.arrays['numbers']]
return momenta / m.reshape(-1, 1)
def get_total_energy(self):
"""Get the total energy - potential plus kinetic energy."""
return self.get_potential_energy() + self.get_kinetic_energy()
def get_forces(self, apply_constraint=True):
"""Calculate atomic forces.
Ask the attached calculator to calculate the forces and apply
constraints. Use *apply_constraint=False* to get the raw
forces."""
if self._calc is None:
raise RuntimeError('Atoms object has no calculator.')
forces = self._calc.get_forces(self)
if apply_constraint:
for constraint in self.constraints:
constraint.adjust_forces(self.arrays['positions'], forces)
return forces
def get_polarizability(self):
"""Get the total energy - potential plus kinetic energy."""
if self._calc is None:
raise RuntimeError('Atoms object has no calculator.')
return self._calc.get_polarizability(self)
def get_stress(self, voigt=True):
"""Calculate stress tensor.
Returns an array of the six independent components of the
symmetric stress tensor, in the traditional Voigt order
(xx, yy, zz, yz, xz, xy) or as a 3x3 matrix. Default is Voigt
order.
"""
if self._calc is None:
raise RuntimeError('Atoms object has no calculator.')
stress = self._calc.get_stress(self)
shape = stress.shape
if shape == (3, 3):
warnings.warn('Converting 3x3 stress tensor from %s ' %
self._calc.__class__.__name__ +
'calculator to the required Voigt form.')
stress = np.array([stress[0, 0], stress[1, 1], stress[2, 2],
stress[1, 2], stress[0, 2], stress[0, 1]])
else:
assert shape == (6,)
if voigt:
return stress
else:
xx, yy, zz, yz, xz, xy = stress
return np.array([(xx, xy, xz),
(xy, yy, yz),
(xz, yz, zz)])
def get_stresses(self):
"""Calculate the stress-tensor of all the atoms.
Only available with calculators supporting per-atom energies and
stresses (e.g. classical potentials). Even for such calculators
there is a certain arbitrariness in defining per-atom stresses.
"""
if self._calc is None:
raise RuntimeError('Atoms object has no calculator.')
return self._calc.get_stresses(self)
def get_dipole_moment(self):
"""Calculate the electric dipole moment for the atoms object.
Only available for calculators which has a get_dipole_moment()
method."""
if self._calc is None:
raise RuntimeError('Atoms object has no calculator.')
return self._calc.get_dipole_moment(self)
def copy(self):
"""Return a copy."""
import copy
atoms = self.__class__(cell=self._cell, pbc=self._pbc, info=self.info)
atoms.arrays = {}
for name, a in self.arrays.items():
atoms.arrays[name] = a.copy()
atoms.constraints = copy.deepcopy(self.constraints)
atoms.adsorbate_info = copy.deepcopy(self.adsorbate_info)
return atoms
def __len__(self):
return len(self.arrays['positions'])
def get_number_of_atoms(self):
"""Returns the number of atoms.
Equivalent to len(atoms) in the standard ASE Atoms class.
"""
return len(self)
def __repr__(self):
num = self.get_atomic_numbers()
N = len(num)
if N == 0:
symbols = ''
elif N <= 60:
symbols = self.get_chemical_formula('reduce')
else:
symbols = self.get_chemical_formula('hill')
s = "%s(symbols='%s', " % (self.__class__.__name__, symbols)
for name in self.arrays:
if name == 'numbers':
continue
s += '%s=..., ' % name
if (self._cell - np.diag(self._cell.diagonal())).any():
s += 'cell=%s, ' % self._cell.tolist()
else:
s += 'cell=%s, ' % self._cell.diagonal().tolist()
s += 'pbc=%s, ' % self._pbc.tolist()
if len(self.constraints) == 1:
s += 'constraint=%s, ' % repr(self.constraints[0])
if len(self.constraints) > 1:
s += 'constraint=%s, ' % repr(self.constraints)
if self._calc is not None:
s += 'calculator=%s(...), ' % self._calc.__class__.__name__
return s[:-2] + ')'
def __add__(self, other):
atoms = self.copy()
atoms += other
return atoms
def extend(self, other):
"""Extend atoms object by appending atoms from *other*."""
if isinstance(other, Atom):
other = self.__class__([other])
n1 = len(self)
n2 = len(other)
for name, a1 in self.arrays.items():
a = np.zeros((n1 + n2,) + a1.shape[1:], a1.dtype)
a[:n1] = a1
if name == 'masses':
a2 = other.get_masses()
else:
a2 = other.arrays.get(name)
if a2 is not None:
a[n1:] = a2
self.arrays[name] = a
for name, a2 in other.arrays.items():
if name in self.arrays:
continue
a = np.empty((n1 + n2,) + a2.shape[1:], a2.dtype)
a[n1:] = a2
if name == 'masses':
a[:n1] = self.get_masses()[:n1]
else:
a[:n1] = 0
self.set_array(name, a)
return self
__iadd__ = extend
def append(self, atom):
"""Append atom to end."""
self.extend(self.__class__([atom]))
def __getitem__(self, i):
"""Return a subset of the atoms.
i -- scalar integer, list of integers, or slice object
describing which atoms to return.
If i is a scalar, return an Atom object. If i is a list or a
slice, return an Atoms object with the same cell, pbc, and
other associated info as the original Atoms object. The
indices of the constraints will be shuffled so that they match
the indexing in the subset returned.
"""
if isinstance(i, int):
natoms = len(self)
if i < -natoms or i >= natoms:
raise IndexError('Index out of range.')
return Atom(atoms=self, index=i)
import copy
from ase.constraints import FixConstraint
atoms = self.__class__(cell=self._cell, pbc=self._pbc, info=self.info)
# TODO: Do we need to shuffle indices in adsorbate_info too?
atoms.adsorbate_info = self.adsorbate_info
atoms.arrays = {}
for name, a in self.arrays.items():
atoms.arrays[name] = a[i].copy()
# Constraints need to be deepcopied, since we need to shuffle
# the indices
atoms.constraints = copy.deepcopy(self.constraints)
condel = []
for con in atoms.constraints:
if isinstance(con, FixConstraint):
try:
con.index_shuffle(i)
except IndexError:
condel.append(con)
for con in condel:
atoms.constraints.remove(con)
return atoms
def __delitem__(self, i):
from ase.constraints import FixAtoms
check_constraint = np.array([isinstance(c, FixAtoms)
for c in self._constraints])
if (len(self._constraints) > 0 and (not check_constraint.all() or
isinstance(i, list))):
raise RuntimeError('Remove constraint using set_constraint() '
'before deleting atoms.')
mask = np.ones(len(self), bool)
mask[i] = False
for name, a in self.arrays.items():
self.arrays[name] = a[mask]
if len(self._constraints) > 0:
for n in range(len(self._constraints)):
self._constraints[n].delete_atom(range(len(mask))[i])
def pop(self, i=-1):
"""Remove and return atom at index *i* (default last)."""
atom = self[i]
atom.cut_reference_to_atoms()
del self[i]
return atom
def __imul__(self, m):
"""In-place repeat of atoms."""
if isinstance(m, int):
m = (m, m, m)
M = np.product(m)
n = len(self)
for name, a in self.arrays.items():
self.arrays[name] = np.tile(a, (M,) + (1,) * (len(a.shape) - 1))
positions = self.arrays['positions']
i0 = 0
for m0 in range(m[0]):
for m1 in range(m[1]):
for m2 in range(m[2]):
i1 = i0 + n
positions[i0:i1] += np.dot((m0, m1, m2), self._cell)
i0 = i1
if self.constraints is not None:
self.constraints = [c.repeat(m, n) for c in self.constraints]
self._cell = np.array([m[c] * self._cell[c] for c in range(3)])
return self
def repeat(self, rep):
"""Create new repeated atoms object.
The *rep* argument should be a sequence of three positive
integers like *(2,3,1)* or a single integer (*r*) equivalent
to *(r,r,r)*."""
atoms = self.copy()
atoms *= rep
return atoms
__mul__ = repeat
def translate(self, displacement):
"""Translate atomic positions.
The displacement argument can be a float an xyz vector or an
nx3 array (where n is the number of atoms)."""
self.arrays['positions'] += np.array(displacement)
def center(self, vacuum=None, axis=(0, 1, 2)):
"""Center atoms in unit cell.
Centers the atoms in the unit cell, so there is the same
amount of vacuum on all sides.
vacuum: float (default: None)
If specified adjust the amount of vacuum when centering.
If vacuum=10.0 there will thus be 10 Angstrom of vacuum
on each side.
axis: int or sequence of ints
Axis or axes to act on. Default: Act on all axes.
"""
# Find the orientations of the faces of the unit cell
c = self.get_cell()
dirs = np.zeros_like(c)
for i in range(3):
dirs[i] = np.cross(c[i - 1], c[i - 2])
dirs[i] /= np.sqrt(np.dot(dirs[i], dirs[i])) # normalize
if np.dot(dirs[i], c[i]) < 0.0:
dirs[i] *= -1
# Now, decide how much each basis vector should be made longer
if isinstance(axis, int):
axes = (axis,)
else:
axes = axis
p = self.arrays['positions']
longer = np.zeros(3)
shift = np.zeros(3)
for i in axes:
p0 = np.dot(p, dirs[i]).min()
p1 = np.dot(p, dirs[i]).max()
height = np.dot(c[i], dirs[i])
if vacuum is not None:
lng = (p1 - p0 + 2 * vacuum) - height
else:
lng = 0.0 # Do not change unit cell size!
top = lng + height - p1
shf = 0.5 * (top - p0)
cosphi = np.dot(c[i], dirs[i]) / np.sqrt(np.dot(c[i], c[i]))
longer[i] = lng / cosphi
shift[i] = shf / cosphi
# Now, do it!
translation = np.zeros(3)
for i in axes:
nowlen = np.sqrt(np.dot(c[i], c[i]))
self._cell[i] *= 1 + longer[i] / nowlen
translation += shift[i] * c[i] / nowlen
self.arrays['positions'] += translation
def get_center_of_mass(self, scaled=False):
"""Get the center of mass.
If scaled=True the center of mass in scaled coordinates
is returned."""
m = self.get_masses()
com = np.dot(m, self.arrays['positions']) / m.sum()
if scaled:
return np.linalg.solve(self._cell.T, com)
else:
return com
def get_gyration_tensor(self):
'''Get the gyration tensor
returns the eigenvalues of the gyration tensor (and not the sqrt's)
'''
com = self.get_center_of_mass()
positions = self.get_positions()
positions -= com # translate center of mass to origin
natoms = np.float(self.get_number_of_atoms())
#initialize elements of the gyration tensor
G11 = G22 = G33 = G12 = G13 = G23 = 0.0
for i in range(len(self)):
x, y, z = positions[i]
G11 += (x**2) * 1. / natoms
G22 += (y**2) * 1. / natoms
G33 += (z**2) * 1. / natoms
G12 += x * y * 1. / natoms
G13 += x * z * 1. / natoms
G23 += y * z * 1. / natoms
G = np.array([[G11, G12, G13],
[G12, G22, G23],
[G13, G23, G33]])
evals, evecs = np.linalg.eig(G)
evals.sort()
return evals
def get_moments_of_inertia(self, vectors=False):
"""Get the moments of inertia along the principal axes.
The three principal moments of inertia are computed from the
eigenvalues of the symmetric inertial tensor. Periodic boundary
conditions are ignored. Units of the moments of inertia are
amu*angstrom**2.
"""
com = self.get_center_of_mass()
positions = self.get_positions()
positions -= com # translate center of mass to origin
masses = self.get_masses()
#initialize elements of the inertial tensor
I11 = I22 = I33 = I12 = I13 = I23 = 0.0
for i in range(len(self)):
x, y, z = positions[i]
m = masses[i]
I11 += m * (y ** 2 + z ** 2)
I22 += m * (x ** 2 + z ** 2)
I33 += m * (x ** 2 + y ** 2)
I12 += -m * x * y
I13 += -m * x * z
I23 += -m * y * z
I = np.array([[I11, I12, I13],
[I12, I22, I23],
[I13, I23, I33]])
evals, evecs = np.linalg.eigh(I)
if vectors:
return evals, evecs.transpose()
else:
return evals
def get_angular_momentum(self):
"""Get total angular momentum with respect to the center of mass."""
com = self.get_center_of_mass()
positions = self.get_positions()
positions -= com # translate center of mass to origin
return np.cross(positions, self.get_momenta()).sum(0)
def rotate(self, v, a=None, center=(0, 0, 0), rotate_cell=False):
"""Rotate atoms based on a vector and an angle, or two vectors.
Parameters:
v:
Vector to rotate the atoms around. Vectors can be given as
strings: 'x', '-x', 'y', ... .
a = None:
Angle that the atoms is rotated around the vecor 'v'. If an angle
is not specified, the length of 'v' is used as the angle
(default). The angle can also be a vector and then 'v' is rotated
into 'a'.
center = (0, 0, 0):
The center is kept fixed under the rotation. Use 'COM' to fix
the center of mass, 'COP' to fix the center of positions or
'COU' to fix the center of cell.
rotate_cell = False:
If true the cell is also rotated.
Examples:
Rotate 90 degrees around the z-axis, so that the x-axis is
rotated into the y-axis:
>>> a = pi / 2
>>> atoms.rotate('z', a)
>>> atoms.rotate((0, 0, 1), a)
>>> atoms.rotate('-z', -a)
>>> atoms.rotate((0, 0, a))
>>> atoms.rotate('x', 'y')
"""
norm = np.linalg.norm
v = string2vector(v)
if a is None:
a = norm(v)
if isinstance(a, (float, int)):
v /= norm(v)
c = cos(a)
s = sin(a)
else:
v2 = string2vector(a)
v /= norm(v)
v2 /= norm(v2)
c = np.dot(v, v2)
v = np.cross(v, v2)
s = norm(v)
# In case *v* and *a* are parallel, np.cross(v, v2) vanish
# and can't be used as a rotation axis. However, in this
# case any rotation axis perpendicular to v2 will do.
eps = 1e-7
if s < eps:
v = np.cross((0, 0, 1), v2)
if norm(v) < eps:
v = np.cross((1, 0, 0), v2)
assert norm(v) >= eps
elif s > 0:
v /= s
if isinstance(center, str):
if center.lower() == 'com':
center = self.get_center_of_mass()
elif center.lower() == 'cop':
center = self.get_positions().mean(axis=0)
elif center.lower() == 'cou':
center = self.get_cell().sum(axis=0) / 2
else:
raise ValueError('Cannot interpret center')
else:
center = np.array(center)
p = self.arrays['positions'] - center
self.arrays['positions'][:] = (c * p -
np.cross(p, s * v) +
np.outer(np.dot(p, v), (1.0 - c) * v) +
center)
if rotate_cell:
rotcell = self.get_cell()
rotcell[:] = (c * rotcell -
np.cross(rotcell, s * v) +
np.outer(np.dot(rotcell, v), (1.0 - c) * v))
self.set_cell(rotcell)
def rotate_euler(self, center=(0, 0, 0), phi=0.0, theta=0.0, psi=0.0):
"""Rotate atoms via Euler angles.
See e.g http://mathworld.wolfram.com/EulerAngles.html for explanation.
Parameters:
center :
The point to rotate about. A sequence of length 3 with the
coordinates, or 'COM' to select the center of mass, 'COP' to
select center of positions or 'COU' to select center of cell.
phi :
The 1st rotation angle around the z axis.
theta :
Rotation around the x axis.
psi :
2nd rotation around the z axis.
"""
if isinstance(center, str):
if center.lower() == 'com':
center = self.get_center_of_mass()
elif center.lower() == 'cop':
center = self.get_positions().mean(axis=0)
elif center.lower() == 'cou':
center = self.get_cell().sum(axis=0) / 2
else:
raise ValueError('Cannot interpret center')
else:
center = np.array(center)
# First move the molecule to the origin In contrast to MATLAB,
# numpy broadcasts the smaller array to the larger row-wise,
# so there is no need to play with the Kronecker product.
rcoords = self.positions - center
# First Euler rotation about z in matrix form
D = np.array(((cos(phi), sin(phi), 0.),
(-sin(phi), cos(phi), 0.),
(0., 0., 1.)))
# Second Euler rotation about x:
C = np.array(((1., 0., 0.),
(0., cos(theta), sin(theta)),
(0., -sin(theta), cos(theta))))
# Third Euler rotation, 2nd rotation about z:
B = np.array(((cos(psi), sin(psi), 0.),
(-sin(psi), cos(psi), 0.),
(0., 0., 1.)))
# Total Euler rotation
A = np.dot(B, np.dot(C, D))
# Do the rotation
rcoords = np.dot(A, np.transpose(rcoords))
# Move back to the rotation point
self.positions = np.transpose(rcoords) + center
def get_dihedral(self, list):
"""Calculate dihedral angle.
Calculate dihedral angle between the vectors list[0]->list[1]
and list[2]->list[3], where list contains the atomic indexes
in question.
"""
# vector 0->1, 1->2, 2->3 and their normalized cross products:
a = self.positions[list[1]] - self.positions[list[0]]
b = self.positions[list[2]] - self.positions[list[1]]
c = self.positions[list[3]] - self.positions[list[2]]
bxa = np.cross(b, a)
bxa /= np.linalg.norm(bxa)
cxb = np.cross(c, b)
cxb /= np.linalg.norm(cxb)
angle = np.vdot(bxa, cxb)
# check for numerical trouble due to finite precision:
if angle < -1:
angle = -1
if angle > 1:
angle = 1
angle = np.arccos(angle)
if np.vdot(bxa, c) > 0:
angle = 2 * np.pi - angle
return angle
def _masked_rotate(self, center, axis, diff, mask):
# do rotation of subgroup by copying it to temporary atoms object
# and then rotating that
#
# recursive object definition might not be the most elegant thing,
# more generally useful might be a rotation function with a mask?
group = self.__class__()
for i in range(len(self)):
if mask[i]:
group += self[i]
group.translate(-center)
group.rotate(axis, diff)
group.translate(center)
# set positions in original atoms object
j = 0
for i in range(len(self)):
if mask[i]:
self.positions[i] = group[j].position
j += 1
def set_dihedral(self, list, angle, mask=None):
"""
set the dihedral angle between vectors list[0]->list[1] and
list[2]->list[3] by changing the atom indexed by list[3]
if mask is not None, all the atoms described in mask
(read: the entire subgroup) are moved
example: the following defines a very crude
ethane-like molecule and twists one half of it by 30 degrees.
>>> atoms = Atoms('HHCCHH', [[-1, 1, 0], [-1, -1, 0], [0, 0, 0],
[1, 0, 0], [2, 1, 0], [2, -1, 0]])
>>> atoms.set_dihedral([1,2,3,4],7*pi/6,mask=[0,0,0,1,1,1])
"""
# if not provided, set mask to the last atom in the
# dihedral description
if mask is None:
mask = np.zeros(len(self))
mask[list[3]] = 1
# compute necessary in dihedral change, from current value
current = self.get_dihedral(list)
diff = angle - current
axis = self.positions[list[2]] - self.positions[list[1]]
center = self.positions[list[2]]
self._masked_rotate(center, axis, diff, mask)
def rotate_dihedral(self, list, angle, mask=None):
"""Rotate dihedral angle.
Complementing the two routines above: rotate a group by a
predefined dihedral angle, starting from its current
configuration
"""
start = self.get_dihedral(list)
self.set_dihedral(list, angle + start, mask)
def get_angle(self, list):
"""Get angle formed by three atoms.
calculate angle between the vectors list[1]->list[0] and
list[1]->list[2], where list contains the atomic indexes in
question."""
# normalized vector 1->0, 1->2:
v10 = self.positions[list[0]] - self.positions[list[1]]
v12 = self.positions[list[2]] - self.positions[list[1]]
v10 /= np.linalg.norm(v10)
v12 /= np.linalg.norm(v12)
angle = np.vdot(v10, v12)
angle = np.arccos(angle)
return angle
def set_angle(self, list, angle, mask=None):
"""Set angle formed by three atoms.
Sets the angle between vectors list[1]->list[0] and
list[1]->list[2].
Same usage as in set_dihedral."""
# If not provided, set mask to the last atom in the angle description
if mask is None:
mask = np.zeros(len(self))
mask[list[2]] = 1
# Compute necessary in angle change, from current value
current = self.get_angle(list)
diff = angle - current
# Do rotation of subgroup by copying it to temporary atoms object and
# then rotating that
v10 = self.positions[list[0]] - self.positions[list[1]]
v12 = self.positions[list[2]] - self.positions[list[1]]
v10 /= np.linalg.norm(v10)
v12 /= np.linalg.norm(v12)
axis = np.cross(v10, v12)
center = self.positions[list[1]]
self._masked_rotate(center, axis, diff, mask)
def rattle(self, stdev=0.001, seed=42):
"""Randomly displace atoms.
This method adds random displacements to the atomic positions,
taking a possible constraint into account. The random numbers are
drawn from a normal distribution of standard deviation stdev.
For a parallel calculation, it is important to use the same
seed on all processors! """
rs = np.random.RandomState(seed)
positions = self.arrays['positions']
self.set_positions(positions +
rs.normal(scale=stdev, size=positions.shape))
def get_distance(self, a0, a1, mic=False):
"""Return distance between two atoms.
Use mic=True to use the Minimum Image Convention.
"""
R = self.arrays['positions']
D = R[a1] - R[a0]
if mic:
Dr = np.linalg.solve(self._cell.T, D)
D = np.dot(Dr - np.round(Dr) * self._pbc, self._cell)
return np.linalg.norm(D)
def get_distances(self, No_i, No_list, mic=False):
"""Return distances of atom No.i with a list of atoms
Use mic=True to use the Minimum Image Convention.
"""
R = self.arrays['positions']
D = R[No_list] - R[No_i]
if mic:
Dr = np.linalg.solve(self._cell, D.T)
D = np.dot(self._cell, Dr - (self._pbc * np.round(Dr).T).T).T
return np.sqrt((D**2).sum(1))
def get_distances_all(self, mic=False):
"""Return distances of all of the atoms with all of the atoms.
Use mic=True to use the Minimum Image Convention.
"""
L = len(self)
D = None
for i in range(L):
R = self.arrays['positions']
iD = R[range(i, L)] - R[i]
if D == None:
D = iD
else:
D = np.append(D, iD, axis=0)
if mic:
Dr = np.linalg.solve(self._cell, D.T)
D = np.dot(self._cell, Dr - (self._pbc * np.round(Dr).T).T).T
results = np.sqrt((D**2).sum(1))
ret = np.empty([L, L])
n = 0
for i in range(L):
for ii in range(i, L):
ret[i, ii] = results[n]
ret[ii, i] = ret[i, ii]
n += 1
return ret
def set_distance(self, a0, a1, distance, fix=0.5, mic=False):
"""Set the distance between two atoms.
Set the distance between atoms *a0* and *a1* to *distance*.
By default, the center of the two atoms will be fixed. Use
*fix=0* to fix the first atom, *fix=1* to fix the second
atom and *fix=0.5* (default) to fix the center of the bond."""
R = self.arrays['positions']
D = R[a1] - R[a0]
if mic:
Dr = np.linalg.solve(self._cell.T, D)
D = np.dot(Dr - np.round(Dr) * self._pbc, self._cell)
x = 1.0 - distance / np.linalg.norm(D)
R[a0] += (x * fix) * D
R[a1] -= (x * (1.0 - fix)) * D
def get_scaled_positions(self):
"""Get positions relative to unit cell.
Atoms outside the unit cell will be wrapped into the cell in
those directions with periodic boundary conditions so that the
scaled coordinates are between zero and one."""
scaled = np.linalg.solve(self._cell.T, self.arrays['positions'].T).T
for i in range(3):
if self._pbc[i]:
# Yes, we need to do it twice.
# See the scaled_positions.py test
scaled[:, i] %= 1.0
scaled[:, i] %= 1.0
return scaled
def set_scaled_positions(self, scaled):
"""Set positions relative to unit cell."""
self.arrays['positions'][:] = np.dot(scaled, self._cell)
def get_temperature(self):
"""Get the temperature. in Kelvin"""
ekin = self.get_kinetic_energy() / len(self)
return ekin / (1.5 * units.kB)
def __eq__(self, other):
"""Check for identity of two atoms objects.
Identity means: same positions, atomic numbers, unit cell and
periodic boundary conditions."""
try:
a = self.arrays
b = other.arrays
return (len(self) == len(other) and
(a['positions'] == b['positions']).all() and
(a['numbers'] == b['numbers']).all() and
(self._cell == other.cell).all() and
(self._pbc == other.pbc).all())
except AttributeError:
return NotImplemented
def __ne__(self, other):
eq = self.__eq__(other)
if eq is NotImplemented:
return eq
else:
return not eq
__hash__ = None
def get_volume(self):
"""Get volume of unit cell."""
return abs(np.linalg.det(self._cell))
def _get_positions(self):
"""Return reference to positions-array for in-place manipulations."""
return self.arrays['positions']
def _set_positions(self, pos):
"""Set positions directly, bypassing constraints."""
self.arrays['positions'][:] = pos
positions = property(_get_positions, _set_positions,
doc='Attribute for direct ' +
'manipulation of the positions.')
def _get_atomic_numbers(self):
"""Return reference to atomic numbers for in-place
manipulations."""
return self.arrays['numbers']
numbers = property(_get_atomic_numbers, set_atomic_numbers,
doc='Attribute for direct ' +
'manipulation of the atomic numbers.')
def _get_cell(self):
"""Return reference to unit cell for in-place manipulations."""
return self._cell
cell = property(_get_cell, set_cell, doc='Attribute for direct ' +
'manipulation of the unit cell.')
def _get_pbc(self):
"""Return reference to pbc-flags for in-place manipulations."""
return self._pbc
pbc = property(_get_pbc, set_pbc,
doc='Attribute for direct manipulation ' +
'of the periodic boundary condition flags.')
def write(self, filename, format=None, **kwargs):
"""Write yourself to a file."""
from ase.io import write
write(filename, self, format, **kwargs)
def edit(self):
"""Modify atoms interactively through ase-gui viewer.
Conflicts leading to undesirable behaviour might arise
when matplotlib has been pre-imported with certain
incompatible backends and while trying to use the
plot feature inside the interactive ag. To circumvent,
please set matplotlib.use('gtk') before calling this
method.
"""
from ase.gui.images import Images
from ase.gui.gui import GUI
images = Images([self])
gui = GUI(images)
gui.run()
# use atoms returned from gui:
# (1) delete all currently available atoms
self.set_constraint()
for z in range(len(self)):
self.pop()
edited_atoms = gui.images.get_atoms(0)
# (2) extract atoms from edit session
self.extend(edited_atoms)
self.set_constraint(edited_atoms._get_constraints())
self.set_cell(edited_atoms.get_cell())
self.set_initial_magnetic_moments(edited_atoms.get_magnetic_moments())
self.set_tags(edited_atoms.get_tags())
return
def string2symbols(s):
"""Convert string to list of chemical symbols."""
n = len(s)
if n == 0:
return []
c = s[0]
if c.isdigit():
i = 1
while i < n and s[i].isdigit():
i += 1
return int(s[:i]) * string2symbols(s[i:])
if c == '(':
p = 0
for i, c in enumerate(s):
if c == '(':
p += 1
elif c == ')':
p -= 1
if p == 0:
break
j = i + 1
while j < n and s[j].isdigit():
j += 1
if j > i + 1:
m = int(s[i + 1:j])
else:
m = 1
return m * string2symbols(s[1:i]) + string2symbols(s[j:])
if c.isupper():
i = 1
if 1 < n and s[1].islower():
i += 1
j = i
while j < n and s[j].isdigit():
j += 1
if j > i:
m = int(s[i:j])
else:
m = 1
return m * [s[:i]] + string2symbols(s[j:])
else:
raise ValueError
def symbols2numbers(symbols):
if isinstance(symbols, str):
symbols = string2symbols(symbols)
numbers = []
for s in symbols:
if isinstance(s, (str, unicode)):
numbers.append(atomic_numbers[s])
else:
numbers.append(s)
return numbers
def string2vector(v):
if isinstance(v, str):
if v[0] == '-':
return -string2vector(v[1:])
w = np.zeros(3)
w['xyz'.index(v)] = 1.0
return w
return np.array(v, float)
def default(data, dflt):
"""Helper function for setting default values."""
if data is None:
return None
elif isinstance(data, (list, tuple)):
newdata = []
allnone = True
for x in data:
if x is None:
newdata.append(dflt)
else:
newdata.append(x)
allnone = False
if allnone:
return None
return newdata
else:
return data
|
PHOTOX/fuase
|
ase/ase/atoms.py
|
Python
|
gpl-2.0
| 58,186
|
[
"ASE"
] |
2bf6dee1748a27cbcbf31813c694ce2d2fc7f17593e8331643768eb065bed7c9
|
'''
*******************************************************************
* File: readSBML.py
* Description:
* Author: HarshaRani
* E-mail: hrani@ncbs.res.in
********************************************************************/
/**********************************************************************
** This program is part of 'MOOSE', the
** Messaging Object Oriented Simulation Environment,
** also known as GENESIS 3 base code.
** copyright (C) 2003-2017 Upinder S. Bhalla. and NCBS
Created : Thu May 12 10:19:00 2016(+0530)
Version
Last-Updated: Mon Apr 3 15:50:00 2017(+0530)
By:
**********************************************************************/
'''
import sys
import os.path
import collections
import moose
from moose.SBML.validation import validateModel
import re
'''
TODO in
-Compartment
--Need to deal with compartment outside
-Molecule
-- mathML only AssisgmentRule is taken partly I have checked addition and multiplication,
--, need to do for other calculation.
-- In Assisgment rule one of the variable is a function, in moose since assignment is done using function,
function can't get input from another function (model 000740 in l3v1)
-Loading Model from SBML
--Tested 1-30 testcase example model provided by l3v1 and l2v4 std.
---These are the models that worked (sbml testcase)1-6,10,14-15,17-21,23-25,34,35,58
---Need to check
----what to do when boundarycondition is true i.e.,
differential equation derived from the reaction definitions
should not be calculated for the species(7-9,11-13,16)
----kineticsLaw, Math fun has fraction,ceiling,reminder,power 28etc.
----Events to be added 26
----initial Assisgment for compartment 27
----when stoichiometry is rational number 22
---- For Michaelis Menten kinetics km is not defined which is most of the case need to calculate
'''
foundLibSBML_ = False
try:
import libsbml
foundLibSBML_ = True
except ImportError:
pass
def mooseReadSBML(filepath, loadpath, solver="ee"):
global foundLibSBML_
if not foundLibSBML_:
print('No python-libsbml found.'
'\nThis module can be installed by following command in terminal:'
'\n\t easy_install python-libsbml'
'\n\t apt-get install python-libsbml'
)
return moose.element('/')
if not os.path.isfile(filepath):
print('%s is not found ' % filepath)
return moose.element('/')
with open(filepath, "r") as filep:
filep = open(filepath, "r")
document = libsbml.readSBML(filepath)
tobecontinue = False
tobecontinue = validateModel(document)
if tobecontinue:
level = document.getLevel()
version = document.getVersion()
print(("\n" + "File: " + filepath + " (Level " +
str(level) + ", version " + str(version) + ")"))
model = document.getModel()
if (model is None):
print("No model present.")
return moose.element('/')
else:
print((" model: " + str(model)))
print(("functionDefinitions: " +
str(model.getNumFunctionDefinitions())))
print((" unitDefinitions: " +
str(model.getNumUnitDefinitions())))
print((" compartmentTypes: " +
str(model.getNumCompartmentTypes())))
print((" specieTypes: " +
str(model.getNumSpeciesTypes())))
print((" compartments: " +
str(model.getNumCompartments())))
print((" species: " +
str(model.getNumSpecies())))
print((" parameters: " +
str(model.getNumParameters())))
print((" initialAssignments: " +
str(model.getNumInitialAssignments())))
print((" rules: " +
str(model.getNumRules())))
print((" constraints: " +
str(model.getNumConstraints())))
print((" reactions: " +
str(model.getNumReactions())))
print((" events: " +
str(model.getNumEvents())))
print("\n")
if (model.getNumCompartments() == 0):
return moose.element('/')
else:
baseId = moose.Neutral(loadpath)
basePath = baseId
# All the model will be created under model as
# a thumbrule
basePath = moose.Neutral(baseId.path + '/model')
# Map Compartment's SBML id as key and value is
# list of[ Moose ID and SpatialDimensions ]
global comptSbmlidMooseIdMap
global warning
warning = " "
global msg
msg = " "
groupInfo = {}
modelAnnotaInfo = {}
comptSbmlidMooseIdMap = {}
globparameterIdValue = {}
mapParameter(model, globparameterIdValue)
errorFlag = createCompartment(
basePath, model, comptSbmlidMooseIdMap)
groupInfo = checkGroup(basePath,model)
if errorFlag:
specInfoMap = {}
errorFlag,warning = createSpecies(
basePath, model, comptSbmlidMooseIdMap, specInfoMap, modelAnnotaInfo,groupInfo)
#print(errorFlag,warning)
if errorFlag:
errorFlag = createRules(
model, specInfoMap, globparameterIdValue)
if errorFlag:
errorFlag, msg = createReaction(
model, specInfoMap, modelAnnotaInfo, globparameterIdValue,groupInfo)
getModelAnnotation(
model, baseId, basePath)
if not errorFlag:
print(msg)
# Any time in the middle if SBML does not read then I
# delete everything from model level This is important
# as while reading in GUI the model will show up untill
# built which is not correct print "Deleted rest of the
# model"
moose.delete(basePath)
basePath = moose.Shell('/')
return basePath
else:
print("Validation failed while reading the model.")
return moose.element('/')
def checkGroup(basePath,model):
groupInfo = {}
modelAnnotaInfo = {}
if model.getPlugin("groups") != None:
mplugin = model.getPlugin("groups")
modelgn = mplugin.getNumGroups()
for gindex in range(0, mplugin.getNumGroups()):
p = mplugin.getGroup(gindex)
groupAnnoInfo = {}
groupAnnoInfo = getObjAnnotation(p, modelAnnotaInfo)
if moose.exists(basePath.path+'/'+groupAnnoInfo["Compartment"]):
if not moose.exists(basePath.path+'/'+groupAnnoInfo["Compartment"]+'/'+p.getId()):
moosegrp = moose.Neutral(basePath.path+'/'+groupAnnoInfo["Compartment"]+'/'+p.getId())
else:
moosegrp = moose.element(basePath.path+'/'+groupAnnoInfo["Compartment"]+'/'+p.getId())
moosegrpinfo = moose.Annotator(moosegrp.path+'/info')
moosegrpinfo.color = groupAnnoInfo["bgColor"]
else:
print ("Compartment not found")
if p.getKind() == 2:
if p.getId() not in groupInfo:
#groupInfo[p.getId()]
for gmemIndex in range(0,p.getNumMembers()):
mem = p.getMember(gmemIndex)
if p.getId() in groupInfo:
groupInfo[p.getId()].append(mem.getIdRef())
else:
groupInfo[p.getId()] =[mem.getIdRef()]
return groupInfo
def setupEnzymaticReaction(enz, groupName, enzName,
specInfoMap, modelAnnotaInfo):
enzPool = (modelAnnotaInfo[groupName]["enzyme"])
enzPool = str(idBeginWith(enzPool))
enzParent = specInfoMap[enzPool]["Mpath"]
cplx = (modelAnnotaInfo[groupName]["complex"])
cplx = str(idBeginWith(cplx))
complx = moose.element(specInfoMap[cplx]["Mpath"].path)
enzyme_ = moose.Enz(enzParent.path + '/' + enzName)
moose.move(complx, enzyme_)
moose.connect(enzyme_, "cplx", complx, "reac")
moose.connect(enzyme_, "enz", enzParent, "reac")
sublist = (modelAnnotaInfo[groupName]["substrate"])
prdlist = (modelAnnotaInfo[groupName]["product"])
for si in range(0, len(sublist)):
sl = sublist[si]
sl = str(idBeginWith(sl))
mSId = specInfoMap[sl]["Mpath"]
moose.connect(enzyme_, "sub", mSId, "reac")
for pi in range(0, len(prdlist)):
pl = prdlist[pi]
pl = str(idBeginWith(pl))
mPId = specInfoMap[pl]["Mpath"]
moose.connect(enzyme_, "prd", mPId, "reac")
if (enz.isSetNotes):
pullnotes(enz, enzyme_)
return enzyme_, True
def addSubPrd(reac, reName, type, reactSBMLIdMooseId, specInfoMap):
rctMapIter = {}
if (type == "sub"):
noplusStoichsub = 0
addSubinfo = collections.OrderedDict()
for rt in range(0, reac.getNumReactants()):
rct = reac.getReactant(rt)
sp = rct.getSpecies()
rctMapIter[sp] = rct.getStoichiometry()
if rct.getStoichiometry() > 1:
pass
# print " stoich ",reac.name,rct.getStoichiometry()
noplusStoichsub = noplusStoichsub + rct.getStoichiometry()
for key, value in list(rctMapIter.items()):
key = str(idBeginWith(key))
src = specInfoMap[key]["Mpath"]
des = reactSBMLIdMooseId[reName]["MooseId"]
for s in range(0, int(value)):
moose.connect(des, 'sub', src, 'reac', 'OneToOne')
addSubinfo = {"nSub": noplusStoichsub}
reactSBMLIdMooseId[reName].update(addSubinfo)
else:
noplusStoichprd = 0
addPrdinfo = collections.OrderedDict()
for rt in range(0, reac.getNumProducts()):
rct = reac.getProduct(rt)
sp = rct.getSpecies()
rctMapIter[sp] = rct.getStoichiometry()
if rct.getStoichiometry() > 1:
pass
# print " stoich prd",reac.name,rct.getStoichiometry()
noplusStoichprd = noplusStoichprd + rct.getStoichiometry()
for key, values in list(rctMapIter.items()):
# src ReacBase
src = reactSBMLIdMooseId[reName]["MooseId"]
key = parentSp = str(idBeginWith(key))
des = specInfoMap[key]["Mpath"]
for i in range(0, int(values)):
moose.connect(src, 'prd', des, 'reac', 'OneToOne')
addPrdinfo = {"nPrd": noplusStoichprd}
reactSBMLIdMooseId[reName].update(addPrdinfo)
def populatedict(annoDict, label, value):
if label in annoDict:
annoDict.setdefault(label, [])
annoDict[label].update({value})
else:
annoDict[label] = {value}
def getModelAnnotation(obj, baseId, basepath):
annotationNode = obj.getAnnotation()
if annotationNode is not None:
numchild = annotationNode.getNumChildren()
for child_no in range(0, numchild):
childNode = annotationNode.getChild(child_no)
if (childNode.getPrefix() ==
"moose" and childNode.getName() == "ModelAnnotation"):
num_gchildren = childNode.getNumChildren()
for gchild_no in range(0, num_gchildren):
grandChildNode = childNode.getChild(gchild_no)
nodeName = grandChildNode.getName()
if (grandChildNode.getNumChildren() == 1):
baseinfo = moose.Annotator(baseId.path + '/info')
baseinfo.modeltype = "xml"
if nodeName == "runTime":
runtime = float(
(grandChildNode.getChild(0).toXMLString()))
baseinfo.runtime = runtime
if nodeName == "solver":
solver = (grandChildNode.getChild(0).toXMLString())
baseinfo.solver = solver
if(nodeName == "plots"):
plotValue = (
grandChildNode.getChild(0).toXMLString())
p = moose.element(baseId)
datapath = moose.element(baseId).path + "/data"
if not moose.exists(datapath):
datapath = moose.Neutral(baseId.path + "/data")
graph = moose.Neutral(
datapath.path + "/graph_0")
plotlist = plotValue.split(";")
tablelistname = []
for plots in plotlist:
plots = plots.replace(" ", "")
plotorg = plots
if( moose.exists(basepath.path + plotorg) and isinstance(moose.element(basepath.path+plotorg),moose.PoolBase)) :
plotSId = moose.element(
basepath.path + plotorg)
# plotorg = convertSpecialChar(plotorg)
plot2 = plots.replace('/', '_')
plot3 = plot2.replace('[', '_')
plotClean = plot3.replace(']', '_')
plotName = plotClean + ".conc"
fullPath = graph.path + '/' + \
plotName.replace(" ", "")
# If table exist with same name then
# its not created
if not fullPath in tablelistname:
tab = moose.Table2(fullPath)
tablelistname.append(fullPath)
moose.connect(tab, "requestOut", plotSId, "getConc")
def getObjAnnotation(obj, modelAnnotationInfo):
name = obj.getId()
name = name.replace(" ", "_space_")
# modelAnnotaInfo= {}
annotateMap = {}
if (obj.getAnnotation() is not None):
annoNode = obj.getAnnotation()
for ch in range(0, annoNode.getNumChildren()):
childNode = annoNode.getChild(ch)
if (childNode.getPrefix() == "moose" and (childNode.getName() in["ModelAnnotation","EnzymaticReaction","GroupAnnotation"])):
sublist = []
for gch in range(0, childNode.getNumChildren()):
grandChildNode = childNode.getChild(gch)
nodeName = grandChildNode.getName()
nodeValue = ""
if (grandChildNode.getNumChildren() == 1):
nodeValue = grandChildNode.getChild(0).toXMLString()
else:
print(
"Error: expected exactly ONE child of ", nodeName)
if nodeName == "xCord":
annotateMap[nodeName] = nodeValue
if nodeName == "yCord":
annotateMap[nodeName] = nodeValue
if nodeName == "bgColor":
annotateMap[nodeName] = nodeValue
if nodeName == "textColor":
annotateMap[nodeName] = nodeValue
if nodeName == "Group":
annotateMap[nodeName] = nodeValue
if nodeName == "Compartment":
annotateMap[nodeName] = nodeValue
return annotateMap
def getEnzAnnotation(obj, modelAnnotaInfo, rev,
globparameterIdValue, specInfoMap):
name = obj.getId()
name = name.replace(" ", "_space_")
# modelAnnotaInfo= {}
annotateMap = {}
if (obj.getAnnotation() is not None):
annoNode = obj.getAnnotation()
for ch in range(0, annoNode.getNumChildren()):
childNode = annoNode.getChild(ch)
if (childNode.getPrefix() ==
"moose" and childNode.getName() == "EnzymaticReaction"):
sublist = []
for gch in range(0, childNode.getNumChildren()):
grandChildNode = childNode.getChild(gch)
nodeName = grandChildNode.getName()
nodeValue = ""
if (grandChildNode.getNumChildren() == 1):
nodeValue = grandChildNode.getChild(0).toXMLString()
else:
print(
"Error: expected exactly ONE child of ", nodeName)
if nodeName == "enzyme":
populatedict(annotateMap, "enzyme", nodeValue)
elif nodeName == "complex":
populatedict(annotateMap, "complex", nodeValue)
elif (nodeName == "substrates"):
populatedict(annotateMap, "substrates", nodeValue)
elif (nodeName == "product"):
populatedict(annotateMap, "product", nodeValue)
elif (nodeName == "groupName"):
populatedict(annotateMap, "grpName", nodeValue)
elif (nodeName == "stage"):
populatedict(annotateMap, "stage", nodeValue)
elif (nodeName == "Group"):
populatedict(annotateMap, "group", nodeValue)
elif (nodeName == "xCord"):
populatedict(annotateMap, "xCord", nodeValue)
elif (nodeName == "yCord"):
populatedict(annotateMap, "yCord", nodeValue)
groupName = ""
if 'grpName' in annotateMap:
groupName = list(annotateMap["grpName"])[0]
klaw = obj.getKineticLaw()
mmsg = ""
errorFlag, mmsg, k1, k2 = getKLaw(
obj, klaw, rev, globparameterIdValue, specInfoMap)
if 'substrates' in annotateMap:
sublist = list(annotateMap["substrates"])
else:
sublist = {}
if 'product' in annotateMap:
prdlist = list(annotateMap["product"])
else:
prdlist = {}
if list(annotateMap["stage"])[0] == '1':
if groupName in modelAnnotaInfo:
modelAnnotaInfo[groupName].update(
{"enzyme": list(annotateMap["enzyme"])[0],
"stage": list(annotateMap["stage"])[0],
"substrate": sublist,
"k1": k1,
"k2": k2
}
)
else:
modelAnnotaInfo[groupName] = {
"enzyme": list(annotateMap["enzyme"])[0],
"stage": list(annotateMap["stage"])[0],
"substrate": sublist,
"k1": k1,
"k2": k2
#"group" : list(annotateMap["Group"])[0],
#"xCord" : list(annotateMap["xCord"])[0],
#"yCord" : list(annotateMap["yCord"]) [0]
}
elif list(annotateMap["stage"])[0] == '2':
if groupName in modelAnnotaInfo:
stage = int(modelAnnotaInfo[groupName][
"stage"]) + int(list(annotateMap["stage"])[0])
modelAnnotaInfo[groupName].update(
{"complex": list(annotateMap["complex"])[0],
"product": prdlist,
"stage": [stage],
"k3": k1
}
)
else:
modelAnnotaInfo[groupName] = {
"complex": list(annotateMap["complex"])[0],
"product": prdlist,
"stage": [stage],
"k3": k1
}
return(groupName)
def createReaction(model, specInfoMap, modelAnnotaInfo, globparameterIdValue,groupInfo):
# print " reaction "
# Things done for reaction
# --Reaction is not created, if substrate and product is missing
# --Reaction is created under first substrate's compartment if substrate not found then product
# --Reaction is created if substrate or product is missing, but while run time in GUI atleast I have stopped
# ToDo
# -- I need to check here if any substance/product is if ( constant == true && bcondition == false)
# cout <<"The species "<< name << " should not appear in reactant or product as per sbml Rules"<< endl;
errorFlag = True
reactSBMLIdMooseId = {}
msg = ""
rName = ""
reaction_ = None
for ritem in range(0, model.getNumReactions()):
reactionCreated = False
groupName = ""
rName = ""
rId = ""
reac = model.getReaction(ritem)
group = ""
reacAnnoInfo = {}
reacAnnoInfo = getObjAnnotation(reac, modelAnnotaInfo)
# if "Group" in reacAnnoInfo:
# group = reacAnnoInfo["Group"]
if (reac.isSetId()):
rId = reac.getId()
groups = [k for k, v in groupInfo.iteritems() if rId in v]
if groups:
group = groups[0]
if (reac.isSetName()):
rName = reac.getName()
rName = rName.replace(" ", "_space_")
if not(rName):
rName = rId
rev = reac.getReversible()
fast = reac.getFast()
if (fast):
print(
" warning: for now fast attribute is not handled \"",
rName,
"\"")
if (reac.getAnnotation() is not None):
groupName = getEnzAnnotation(
reac, modelAnnotaInfo, rev, globparameterIdValue, specInfoMap)
if (groupName != "" and list(
modelAnnotaInfo[groupName]["stage"])[0] == 3):
reaction_, reactionCreated = setupEnzymaticReaction(
reac, groupName, rName, specInfoMap, modelAnnotaInfo)
reaction_.k3 = modelAnnotaInfo[groupName]['k3']
reaction_.k2 = modelAnnotaInfo[groupName]['k2']
reaction_.concK1 = modelAnnotaInfo[groupName]['k1']
if reactionCreated:
if (reac.isSetNotes):
pullnotes(reac, reaction_)
reacAnnoInfo = {}
reacAnnoInfo = getObjAnnotation(reac, modelAnnotaInfo)
if reacAnnoInfo:
if not moose.exists(reaction_.path + '/info'):
reacInfo = moose.Annotator(reaction_.path + '/info')
else:
reacInfo = moose.element(reaction_.path + '/info')
for k, v in list(reacAnnoInfo.items()):
if k == 'xCord':
reacInfo.x = float(v)
elif k == 'yCord':
reacInfo.y = float(v)
elif k == 'bgColor':
reacInfo.color = v
else:
reacInfo.textColor = v
elif(groupName == ""):
numRcts = reac.getNumReactants()
numPdts = reac.getNumProducts()
nummodifiers = reac.getNumModifiers()
if not (numRcts and numPdts):
print("Warning: %s" %(rName)," : Substrate or Product is missing, we will be skiping creating this reaction in MOOSE")
reactionCreated = False
elif (reac.getNumModifiers() > 0):
reactionCreated, reaction_ = setupMMEnzymeReaction(
reac, rName, specInfoMap, reactSBMLIdMooseId, modelAnnotaInfo, model, globparameterIdValue)
# elif (reac.getNumModifiers() > 0):
# reactionCreated = setupMMEnzymeReaction(reac,rName,specInfoMap,reactSBMLIdMooseId,modelAnnotaInfo,model,globparameterIdValue)
# reaction_ = reactSBMLIdMooseId['classical']['MooseId']
# reactionType = "MMEnz"
elif (numRcts):
# In moose, reactions compartment are decided from first Substrate compartment info
# substrate is missing then check for product
if (reac.getNumReactants()):
react = reac.getReactant(reac.getNumReactants() - 1)
# react = reac.getReactant(0)
sp = react.getSpecies()
sp = str(idBeginWith(sp))
speCompt = specInfoMap[sp]["comptId"].path
if group:
if moose.exists(speCompt+'/'+group):
speCompt = speCompt+'/'+group
else:
speCompt = (moose.Neutral(speCompt+'/'+group)).path
reaction_ = moose.Reac(speCompt + '/' + rName)
reactionCreated = True
reactSBMLIdMooseId[rName] = {
"MooseId": reaction_, "className ": "reaction"}
elif (numPdts):
# In moose, reactions compartment are decided from first Substrate compartment info
# substrate is missing then check for product
if (reac.getNumProducts()):
react = reac.getProducts(0)
sp = react.getSpecies()
sp = str(idBeginWith(sp))
speCompt = specInfoMap[sp]["comptId"].path
reaction_ = moose.Reac(speCompt + '/' + rName)
reactionCreated = True
reactSBMLIdMooseId[rId] = {
"MooseId": reaction_, "className": "reaction"}
if reactionCreated:
if (reac.isSetNotes):
pullnotes(reac, reaction_)
reacAnnoInfo = {}
reacAnnoInfo = getObjAnnotation(reac, modelAnnotaInfo)
if reacAnnoInfo:
if not moose.exists(reaction_.path + '/info'):
reacInfo = moose.Annotator(reaction_.path + '/info')
else:
reacInfo = moose.element(reaction_.path + '/info')
for k, v in list(reacAnnoInfo.items()):
if k == 'xCord':
reacInfo.x = float(v)
elif k == 'yCord':
reacInfo.y = float(v)
elif k == 'bgColor':
reacInfo.color = v
else:
reacInfo.textColor = v
addSubPrd(reac, rName, "sub", reactSBMLIdMooseId, specInfoMap)
addSubPrd(reac, rName, "prd", reactSBMLIdMooseId, specInfoMap)
if reac.isSetKineticLaw():
klaw = reac.getKineticLaw()
mmsg = ""
errorFlag, mmsg, kfvalue, kbvalue = getKLaw(
model, klaw, rev, globparameterIdValue, specInfoMap)
if not errorFlag:
msg = "Error while importing reaction \"" + \
rName + "\"\n Error in kinetics law "
if mmsg != "":
msg = msg + mmsg
return(errorFlag, msg)
else:
# print " reactSBMLIdMooseId
# ",reactSBMLIdMooseId[rName]["nSub"], " prd
# ",reactSBMLIdMooseId[rName]["nPrd"]
if reaction_.className == "Reac":
subn = reactSBMLIdMooseId[rName]["nSub"]
prdn = reactSBMLIdMooseId[rName]["nPrd"]
reaction_.Kf = kfvalue # * pow(1e-3,subn-1)
reaction_.Kb = kbvalue # * pow(1e-3,prdn-1)
elif reaction_.className == "MMenz":
reaction_.kcat = kfvalue
reaction_.Km = kbvalue
return (errorFlag, msg)
def getKLaw(model, klaw, rev, globparameterIdValue, specMapList):
parmValueMap = {}
amt_Conc = "amount"
value = 0.0
np = klaw. getNumParameters()
for pi in range(0, np):
p = klaw.getParameter(pi)
if (p.isSetId()):
ids = p.getId()
if (p.isSetValue()):
value = p.getValue()
parmValueMap[ids] = value
ruleMemlist = []
flag = getMembers(klaw.getMath(), ruleMemlist)
index = 0
kfparm = ""
kbparm = ""
kfvalue = 0
kbvalue = 0
kfp = None
kbp = None
mssgstr = ""
for i in ruleMemlist:
if i in parmValueMap or i in globparameterIdValue:
if index == 0:
kfparm = i
if i in parmValueMap:
kfvalue = parmValueMap[i]
kfp = klaw.getParameter(kfparm)
else:
kfvalue = globparameterIdValue[i]
kfp = model.getParameter(kfparm)
elif index == 1:
kbparm = i
if i in parmValueMap:
kbvalue = parmValueMap[i]
kbp = klaw.getParameter(kbparm)
else:
kbvalue = globparameterIdValue[i]
kbp = model.getParameter(kbparm)
index += 1
elif not (i in specMapList or i in comptSbmlidMooseIdMap):
mssgstr = "\"" + i + "\" is not defined "
return (False, mssgstr)
if kfp != "":
# print " unit set for rate law kfp ",kfparm, " ",kfp.isSetUnits()
if kfp.isSetUnits():
kfud = kfp.getDerivedUnitDefinition()
# print " kfud ",kfud
if kbp != "":
pass
# print " unit set for rate law kbp ",kbparm, " ",kbp.isSetUnits()
return (True, mssgstr, kfvalue, kbvalue)
def getMembers(node, ruleMemlist):
if node.getType() == libsbml.AST_PLUS:
if node.getNumChildren() == 0:
print ("0")
return False
getMembers(node.getChild(0), ruleMemlist)
for i in range(1, node.getNumChildren()):
# addition
getMembers(node.getChild(i), ruleMemlist)
elif node.getType() == libsbml.AST_REAL:
# This will be constant
pass
elif node.getType() == libsbml.AST_NAME:
# This will be the ci term"
ruleMemlist.append(node.getName())
elif node.getType() == libsbml.AST_MINUS:
if node.getNumChildren() == 0:
print("0")
return False
else:
lchild = node.getLeftChild()
getMembers(lchild, ruleMemlist)
rchild = node.getRightChild()
getMembers(rchild, ruleMemlist)
elif node.getType() == libsbml.AST_DIVIDE:
if node.getNumChildren() == 0:
print("0")
return False
else:
lchild = node.getLeftChild()
getMembers(lchild, ruleMemlist)
rchild = node.getRightChild()
getMembers(rchild, ruleMemlist)
elif node.getType() == libsbml.AST_TIMES:
if node.getNumChildren() == 0:
print ("0")
return False
getMembers(node.getChild(0), ruleMemlist)
for i in range(1, node.getNumChildren()):
# Multiplication
getMembers(node.getChild(i), ruleMemlist)
elif node.getType() == libsbml.AST_FUNCTION_POWER:
pass
else:
print(" this case need to be handled", node.getType())
# if len(ruleMemlist) > 2:
# print "Sorry! for now MOOSE cannot handle more than 2 parameters"
# return True
def createRules(model, specInfoMap, globparameterIdValue):
for r in range(0, model.getNumRules()):
rule = model.getRule(r)
comptvolume = []
if (rule.isAssignment()):
rule_variable = rule.getVariable()
rule_variable = parentSp = str(idBeginWith(rule_variable))
poolList = specInfoMap[rule_variable]["Mpath"].path
poolsCompt = findCompartment(moose.element(poolList))
if not isinstance(moose.element(poolsCompt), moose.ChemCompt):
return -2
else:
if poolsCompt.name not in comptvolume:
comptvolume.append(poolsCompt.name)
funcId = moose.Function(poolList + '/func')
objclassname = moose.element(poolList).className
if objclassname == "BufPool" or objclassname == "ZombieBufPool":
moose.connect(funcId, 'valueOut', poolList, 'setN')
elif objclassname == "Pool" or objclassname == "ZombiePool":
# moose.connect( funcId, 'valueOut', poolList ,'increament' )
moose.connect(funcId, 'valueOut', poolList, 'setN')
elif objclassname == "Reac" or objclassname == "ZombieReac":
moose.connect(funcId, 'valueOut', poolList, 'setNumkf')
ruleMath = rule.getMath()
ruleMemlist = []
speFunXterm = {}
getMembers(ruleMath, ruleMemlist)
for i in ruleMemlist:
if (i in specInfoMap):
i = str(idBeginWith(i))
specMapList = specInfoMap[i]["Mpath"]
poolsCompt = findCompartment(moose.element(specMapList))
if not isinstance(moose.element(
poolsCompt), moose.ChemCompt):
return -2
else:
if poolsCompt.name not in comptvolume:
comptvolume.append(poolsCompt.name)
numVars = funcId.numVars
x = funcId.path + '/x[' + str(numVars) + ']'
speFunXterm[i] = 'x' + str(numVars)
moose.connect(specMapList, 'nOut', x, 'input')
funcId.numVars = numVars + 1
elif not(i in globparameterIdValue):
print("check the variable type ", i)
exp = rule.getFormula()
for mem in ruleMemlist:
if (mem in specInfoMap):
#exp1 = exp.replace(mem, str(speFunXterm[mem]))
exp1 = re.sub(r'\b%s\b'% (mem), speFunXterm[mem], exp)
exp = exp1
elif(mem in globparameterIdValue):
#exp1 = exp.replace(mem, str(globparameterIdValue[mem]))
exp1 = re.sub(r'\b%s\b'% (mem), globparameterIdValue[mem], exp)
exp = exp1
else:
print("Math expression need to be checked")
exp = exp.replace(" ", "")
funcId.expr = exp.strip(" \t\n\r")
# return True
elif(rule.isRate()):
print(
"Warning : For now this \"",
rule.getVariable(),
"\" rate Rule is not handled in moose ")
# return False
elif (rule.isAlgebraic()):
print("Warning: For now this ", rule.getVariable(),
" Algebraic Rule is not handled in moose")
# return False
if len(comptvolume) > 1:
warning = "\nFunction ", moose.element(
poolList).name, " has input from different compartment which is depricated in moose and running this model cause moose to crash"
return True
def pullnotes(sbmlId, mooseId):
if sbmlId.getNotes() is not None:
tnodec = ((sbmlId.getNotes()).getChild(0)).getChild(0)
notes = tnodec.getCharacters()
notes = notes.strip(' \t\n\r')
objPath = mooseId.path + "/info"
if not moose.exists(objPath):
objInfo = moose.Annotator(mooseId.path + '/info')
else:
objInfo = moose.element(mooseId.path + '/info')
objInfo.notes = notes
def createSpecies(basePath, model, comptSbmlidMooseIdMap,
specInfoMap, modelAnnotaInfo,groupInfo):
# ToDo:
# - Need to add group name if exist in pool
# - Notes
# print "species "
if not (model.getNumSpecies()):
return (False,"number of species is zero")
else:
for sindex in range(0, model.getNumSpecies()):
spe = model.getSpecies(sindex)
group = ""
specAnnoInfo = {}
specAnnoInfo = getObjAnnotation(spe, modelAnnotaInfo)
# if "Group" in specAnnoInfo:
# group = specAnnoInfo["Group"]
sName = None
sId = spe.getId()
groups = [k for k, v in groupInfo.iteritems() if sId in v]
if groups:
group = groups[0]
if spe.isSetName():
sName = spe.getName()
sName = sName.replace(" ", "_space_")
if spe.isSetCompartment():
comptId = spe.getCompartment()
if not(sName):
sName = sId
constant = spe.getConstant()
boundaryCondition = spe.getBoundaryCondition()
comptEl = comptSbmlidMooseIdMap[comptId]["MooseId"].path
hasonlySubUnit = spe.getHasOnlySubstanceUnits()
# "false": is {unit of amount}/{unit of size} (i.e., concentration or density).
# "true": then the value is interpreted as having a unit of amount only.
if group:
if moose.exists(comptEl+'/'+group):
comptEl = comptEl+'/'+group
else:
comptEl = (moose.Neutral(comptEl+'/'+group)).path
if (boundaryCondition):
poolId = moose.BufPool(comptEl + '/' + sName)
else:
poolId = moose.Pool(comptEl + '/' + sName)
if (spe.isSetNotes):
pullnotes(spe, poolId)
if specAnnoInfo:
if not moose.exists(poolId.path + '/info'):
poolInfo = moose.Annotator(poolId.path + '/info')
else:
poolInfo = moose.element(poolId.path + '/info')
for k, v in list(specAnnoInfo.items()):
if k == 'xCord':
poolInfo.x = float(v)
elif k == 'yCord':
poolInfo.y = float(v)
elif k == 'bgColor':
poolInfo.color = v
else:
poolInfo.textColor = v
specInfoMap[sId] = {
"Mpath": poolId,
"const": constant,
"bcondition": boundaryCondition,
"hassubunit": hasonlySubUnit,
"comptId": comptSbmlidMooseIdMap[comptId]["MooseId"]}
initvalue = 0.0
unitfactor, unitset, unittype = transformUnit(spe, hasonlySubUnit)
if hasonlySubUnit == True:
if spe.isSetInitialAmount():
initvalue = spe.getInitialAmount()
# populating nInit, will automatically calculate the
# concInit.
if not (unitset):
# if unit is not set,
# default unit is assumed as Mole in SBML
unitfactor = pow(6.0221409e23, 1)
unittype = "Mole"
initvalue = initvalue * unitfactor
elif spe.isSetInitialConcentration():
initvalue = spe.getInitialConcentration()
print(" Since hasonlySubUnit is true and concentration is set units are not checked")
poolId.nInit = initvalue
elif hasonlySubUnit == False:
# ToDo : check 00976
if spe.isSetInitialAmount():
initvalue = spe.getInitialAmount()
# initAmount is set we need to convert to concentration
initvalue = initvalue / comptSbmlidMooseIdMap[comptId]["size"]
elif spe.isSetInitialConcentration():
initvalue = spe.getInitialConcentration()
if not unitset:
# print " unit is not set"
unitfactor = pow(10, -3)
initvalue = initvalue * unitfactor
poolId.concInit = initvalue
else:
nr = model.getNumRules()
found = False
for nrItem in range(0, nr):
rule = model.getRule(nrItem)
assignRule = rule.isAssignment()
if (assignRule):
rule_variable = rule.getVariable()
if (rule_variable == sId):
found = True
break
if not (found):
print(
"Invalid SBML: Either initialConcentration or initialAmount must be set or it should be found in assignmentRule but non happening for ",
sName)
return (False,"Invalid SBML: Either initialConcentration or initialAmount must be set or it should be found in assignmentRule but non happening for ",sName)
return (True," ")
def transformUnit(unitForObject, hasonlySubUnit=False):
# print "unit
# ",UnitDefinition.printUnits(unitForObject.getDerivedUnitDefinition())
unitset = False
unittype = None
if (unitForObject.getDerivedUnitDefinition()):
unit = (unitForObject.getDerivedUnitDefinition())
unitnumber = int(unit.getNumUnits())
if unitnumber > 0:
for ui in range(0, unit.getNumUnits()):
lvalue = 1.0
unitType = unit.getUnit(ui)
if(unitType.isLitre()):
exponent = unitType.getExponent()
multiplier = unitType.getMultiplier()
scale = unitType.getScale()
offset = unitType.getOffset()
# units for compartment is Litre but MOOSE compartment is
# m3
scale = scale - 3
lvalue *= pow(multiplier * pow(10.0, scale),
exponent) + offset
unitset = True
unittype = "Litre"
return (lvalue, unitset, unittype)
elif(unitType.isMole()):
exponent = unitType.getExponent()
multiplier = unitType.getMultiplier()
scale = unitType.getScale()
offset = unitType.getOffset()
# if hasOnlySubstanceUnit = True, then assuming Amount
if hasonlySubUnit == True:
lvalue *= pow(multiplier *
pow(10.0, scale), exponent) + offset
# If SBML units are in mole then convert to number by
# multiplying with avogadro's number
lvalue = lvalue * pow(6.0221409e23, 1)
elif hasonlySubUnit == False:
# Pool units in moose is mM
if scale > 0:
lvalue *= pow(multiplier * pow(10.0,
scale - 3), exponent) + offset
elif scale <= 0:
lvalue *= pow(multiplier * pow(10.0,
scale + 3), exponent) + offset
unitset = True
unittype = "Mole"
return (lvalue, unitset, unittype)
elif(unitType.isItem()):
exponent = unitType.getExponent()
multiplier = unitType.getMultiplier()
scale = unitType.getScale()
offset = unitType.getOffset()
# if hasOnlySubstanceUnit = True, then assuming Amount
if hasonlySubUnit == True:
# If SBML units are in Item then amount is populate as
# its
lvalue *= pow(multiplier *
pow(10.0, scale), exponent) + offset
if hasonlySubUnit == False:
# hasonlySubUnit is False, which is assumed concentration,
# Here Item is converted to mole by dividing by
# avogadro and at initiavalue divided by volume"
lvalue *= pow(multiplier *
pow(10.0, scale), exponent) + offset
lvalue = lvalue / pow(6.0221409e23, 1)
unitset = True
unittype = "Item"
return (lvalue, unitset, unittype)
else:
lvalue = 1.0
return (lvalue, unitset, unittype)
def createCompartment(basePath, model, comptSbmlidMooseIdMap):
# ToDoList : Check what should be done for the spaitialdimension is 2 or
# 1, area or length
if not(model.getNumCompartments()):
return False,
else:
for c in range(0, model.getNumCompartments()):
compt = model.getCompartment(c)
# print("Compartment " + str(c) + ": "+ UnitDefinition.printUnits(compt.getDerivedUnitDefinition()))
msize = 0.0
unitfactor = 1.0
sbmlCmptId = None
name = None
if (compt.isSetId()):
sbmlCmptId = compt.getId()
if (compt.isSetName()):
name = compt.getName()
name = name.replace(" ", "_space")
if (compt.isSetOutside()):
outside = compt.getOutside()
if (compt.isSetSize()):
msize = compt.getSize()
if msize == 1:
print("Compartment size is 1")
dimension = compt.getSpatialDimensions()
if dimension == 3:
unitfactor, unitset, unittype = transformUnit(compt)
else:
print(
" Currently we don't deal with spatial Dimension less than 3 and unit's area or length")
return False
if not(name):
name = sbmlCmptId
mooseCmptId = moose.CubeMesh(basePath.path + '/' + name)
mooseCmptId.volume = (msize * unitfactor)
comptSbmlidMooseIdMap[sbmlCmptId] = {
"MooseId": mooseCmptId, "spatialDim": dimension, "size": msize}
return True
def setupMMEnzymeReaction(reac, rName, specInfoMap, reactSBMLIdMooseId,
modelAnnotaInfo, model, globparameterIdValue):
msg = ""
errorFlag = ""
numRcts = reac.getNumReactants()
numPdts = reac.getNumProducts()
nummodifiers = reac.getNumModifiers()
if (nummodifiers):
parent = reac.getModifier(0)
parentSp = parent.getSpecies()
parentSp = str(idBeginWith(parentSp))
enzParent = specInfoMap[parentSp]["Mpath"]
MMEnz = moose.MMenz(enzParent.path + '/' + rName)
moose.connect(enzParent, "nOut", MMEnz, "enzDest")
reactionCreated = True
reactSBMLIdMooseId[rName] = {"MooseId": MMEnz, "className": "MMEnz"}
if reactionCreated:
if (reac.isSetNotes):
pullnotes(reac, MMEnz)
reacAnnoInfo = {}
reacAnnoInfo = getObjAnnotation(reac, modelAnnotaInfo)
if reacAnnoInfo:
if not moose.exists(MMEnz.path + '/info'):
reacInfo = moose.Annotator(MMEnz.path + '/info')
else:
reacInfo = moose.element(MMEnz.path + '/info')
for k, v in list(reacAnnoInfo.items()):
if k == 'xCord':
reacInfo.x = float(v)
elif k == 'yCord':
reacInfo.y = float(v)
elif k == 'bgColor':
reacInfo.color = v
else:
reacInfo.textColor = v
return(reactionCreated, MMEnz)
def mapParameter(model, globparameterIdValue):
for pm in range(0, model.getNumParameters()):
prm = model.getParameter(pm)
if (prm.isSetId()):
parid = prm.getId()
value = 0.0
if (prm.isSetValue()):
value = prm.getValue()
globparameterIdValue[parid] = value
def idBeginWith(name):
changedName = name
if name[0].isdigit():
changedName = "_" + name
return changedName
def convertSpecialChar(str1):
d = {"&": "_and", "<": "_lessthan_", ">": "_greaterthan_", "BEL": "°", "-": "_minus_", "'": "_prime_",
"+": "_plus_", "*": "_star_", "/": "_slash_", "(": "_bo_", ")": "_bc_",
"[": "_sbo_", "]": "_sbc_", ".": "_dot_", " ": "_"
}
for i, j in list(d.items()):
str1 = str1.replace(i, j)
return str1
def mooseIsInstance(element, classNames):
return moose.element(element).__class__.__name__ in classNames
def findCompartment(element):
while not mooseIsInstance(element, ["CubeMesh", "CyclMesh"]):
element = element.parent
return element
if __name__ == "__main__":
try:
sys.argv[1]
except IndexError:
print("Filename or path not given")
exit(0)
else:
filepath = sys.argv[1]
if not os.path.exists(filepath):
print("Filename or path does not exist",filepath)
else:
try:
sys.argv[2]
except :
modelpath = filepath[filepath.rfind('/'):filepath.find('.')]
else:
modelpath = sys.argv[2]
read = mooseReadSBML(filepath, modelpath)
if read:
print(" Model read to moose path "+ modelpath)
else:
print(" could not read SBML to MOOSE")
|
subhacom/moose-core
|
python/moose/SBML/readSBML.py
|
Python
|
gpl-3.0
| 51,504
|
[
"Avogadro",
"MOOSE"
] |
f639bd911504baac1337cff9df43e2e749f9278ae86490f7016f467c7acdf53f
|
from wand import image
from boxmath import *
FILTER="gaussian"
def load():
return image.Image(filename="chrysanthemum.jpg")
def chained():
i = load()
i.resize(629, 483, filter=FILTER)
i.crop(0, 0, 480, 480)
i.resize(1000, 1000, filter=FILTER)
i.save(filename="flower-chained.jpg")
def mb():
i = load()
b = box(i.width, i.height)
b = resize(b, 629, 483)
b = crop(b, 0, 0, 480, 480)
b = resize(b, 1000, 1000)
def resizer(img, w, h):
img.resize(int(w), int(h), filter=FILTER)
return img
def cropper(img, l,t,r,b):
img.crop(int(l),int(t),int(r),int(b))
return img
t = make_transformer(b, resizer, cropper)
i = t(i)
i.save(filename="flower-boxmath.jpg")
def main():
chained()
mb()
if __name__ == '__main__':
main()
|
ericmoritz/boxmath
|
demo.py
|
Python
|
bsd-2-clause
| 829
|
[
"Gaussian"
] |
4fc93fa87faacdde111139873b38f49ba94a8bf1565571d453bad054da84e0a4
|
import numpy as np
import tensorflow as tf
import random
import matplotlib.pyplot as plt
from gm_generate import *
class ToyExample1(object):
"""
Class implementing variational inference for 1D gaussian data
"""
def __init__(self, mu_p=0.0, sigma_p=1.0, alpha_p=3.5, beta_p=0.5, learning_rate=1e-4):
"""
__init__(ToyExample1, float, float, float, float, float) -> None
mu_p: Hyperparamer for p(mu)
sigma_p: Hyperparameter for p(mu)
alpha_p: Hyperparameter for p(sigma)
beta_p: Hyperparameter for p(sigma)
learning_rate: Learning rate for optimizer
"""
self.x = tf.placeholder(tf.float32, (None,))
self.mu_p = mu_p
self.sigma_p = sigma_p
self.alpha_p = alpha_p
self.beta_p = beta_p
self.learning_rate = learning_rate
# Initialize the variables
self.sigma = tf.Variable(random.random())
self.mu = tf.Variable(random.random())
self.alpha = tf.Variable(1.0+random.random())
self.beta = tf.Variable(1.0+random.random())
N = tf.cast(tf.shape(self.x), tf.float32)
# Calculate ELBO (without constant term)
gamma_ratio = tf.exp(tf.lgamma(self.alpha**2 - 1))
gamma_ratio /= (tf.exp(tf.lgamma(self.alpha**2))*self.beta**2)
pi = tf.constant(np.pi)
e = tf.constant(np.e)
self.ELBO = -(N/2.0)*(tf.digamma(self.alpha**2) + tf.log(self.beta**2)) + \
gamma_ratio*(self.mu*tf.reduce_sum(self.x) - \
(N/2.0)*(self.mu**2 + self.sigma**2) - \
0.5*tf.reduce_sum(self.x ** 2)) + \
(self.alpha_p - 1) * (tf.digamma(self.alpha**2) + tf.log(self.beta**2)) - \
(self.alpha**2*self.beta**2)/self.beta_p + \
self.alpha**2 + tf.log(self.beta**2) + tf.lgamma(self.alpha**2) + \
(1-self.alpha**2)*tf.digamma(self.alpha**2) + \
0.5 * tf.log(2.0 * self.sigma**2 * pi * e)
self.ELBO = -self.ELBO
# Maximize ELBO
self.train = tf.train.AdamOptimizer(self.learning_rate).minimize(self.ELBO)
# Set up the session
self.sess = tf.InteractiveSession()
self.sess.run(tf.global_variables_initializer())
def train_step(self, x):
"""
train_step(ToyExample1, ndarray) -> (float, float, float, float, float)
x: Input data, vector of real numbers
Returns:
cost: -ELBO after a step of optimization
mu, sigma, alpha, beta
"""
_, cost = self.sess.run([self.train, self.ELBO], feed_dict={self.x: x})
mu, sigma, alpha, beta = self.sess.run([self.mu, self.sigma, self.alpha, self.beta])
return (-cost, mu, sigma**2, alpha**2, beta**2)
def run_toy_example_1(num_examples=100, data_mu=0.0, data_sigma=1.0, mu_p=0.0, sigma_p=1.0, alpha_p=3.5, beta_p=0.5, learning_rate=1e-4, num_iter=1000):
"""
run_toy_example_1(int, float, float, float, float, float, float, float, float) -> None
Runs the demo shown in the notebook for different settings of hyperparameters.
"""
# Generate the data
pz = [1]
mu = [np.asarray([data_mu])]
sigma = [np.asarray([data_sigma]).reshape((1, 1))]
gmm = GMM(pz=pz, muks=mu, sigmaks=sigma, k=1, d=1)
x, _ = gmm.generate_points(n=num_examples)
x = x.reshape((-1,))
# Compute variational inference estimate for the parameters
costs = []
toy_example = ToyExample1(mu_p, sigma_p, alpha_p, beta_p, learning_rate)
for i in range(num_iter):
cost, mu_ex, sigma_ex, alpha, beta = toy_example.train_step(x)
costs.append(cost)
# Compute mu_expected, sigma_expected
mu_expected = mu_ex # Expected value of mu using q_1 (normal)
sigma_expected = beta*alpha # Expected value of sigma using q_2 (gamma)
# Print results
print 'Optimal m:', mu_ex
print 'Optimal s^2:', sigma_ex
print 'Optimal alpha:', alpha
print 'Optimal beta:', beta
print 'Expected Value for mu:', mu_expected
print 'Optimal Value for sigma^2:', sigma_expected
# Plot cost vs iterations
plt.plot(costs)
plt.title('Iteration vs ELBO')
plt.xlabel('Iterations')
plt.ylabel('ELBO')
plt.show()
# Show the histogram, true distribution and estimated distribution
plt.hist(x, normed=True, color='#cccccc')
def true_dist(x):
return (1.0/np.sqrt(2*np.pi*sigma[0])*np.exp(-0.5*((x-mu[0])/sigma[0])**2))
def estimated_dist(x):
return (1.0/np.sqrt(2*np.pi*sigma_expected)*np.exp(-0.5*((x-mu_expected)/sigma_expected)**2))
x_axis = np.arange(np.min(x)-0.5, np.max(x)+0.5, 0.01)
plt.plot(x_axis, true_dist(x_axis).reshape((-1,)), 'ro', label='True Distribution')
plt.plot(x_axis, estimated_dist(x_axis).reshape((-1,)), 'co', label='Estimated Distribution')
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.show()
"""
if __name__ == '__main__':
ex = ToyExample1(learning_rate=0.05)
for i in range(1000):
cost, mu, sigma, alpha, beta = ex.train_step(np.asarray([1.0, 2.0, 3.0]))
print (cost, mu, sigma, alpha, beta)
"""
|
sh-gupta/VariationalInference
|
toy_example_1.py
|
Python
|
mit
| 4,681
|
[
"Gaussian"
] |
28a83cee7dfa3ca5beded2872988b9d0623ea60d9627916dc73ab879548947ec
|
#
# Copyright (c) 2008--2010 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
# Stuff for handling Servers
#
# system modules
import time
import string
from spacewalk.common import rhnException, rhnFault, log_debug, log_error, \
CFG, rhnFlags
from spacewalk.common.rhnTranslate import _
from spacewalk.server import rhnChannel, rhnUser, rhnSQL, rhnLib, rhnAction, \
rhnVirtualization
from search_notify import SearchNotify
# Local Modules
import server_kickstart
import server_lib
import server_token
from server_certificate import Certificate, gen_secret
from server_wrapper import ServerWrapper
class Server(ServerWrapper):
""" Main Server class """
def __init__(self, user, arch = None, org_id = None):
ServerWrapper.__init__(self)
self.user = user
# Use the handy TableRow
self.server = rhnSQL.Row("rhnServer", "id")
self.server["release"] = ""
self.server["os"] = "Red Hat Linux"
self.is_rpm_managed = 0
self.set_arch(arch)
# We only get this passed in when we create a new
# entry. Usually a reload will create a dummy entry first and
# then call self.loadcert()
if user:
self.server["org_id"] = user.customer["id"]
elif org_id:
self.server["org_id"] = org_id
self.cert = None
# Also, at this point we know that this is a real server
self.type = "REAL"
self.default_description()
# Satellite certificate associated to this server
self.satellite_cert = None
# custom info values
self.custom_info = None
# uuid
self.uuid = None
self.registration_number = None
_query_lookup_arch = rhnSQL.Statement("""
select sa.id,
case when at.label = 'rpm' then 1 else 0 end is_rpm_managed
from rhnServerArch sa,
rhnArchType at
where sa.label = :archname
and sa.arch_type_id = at.id
""")
def set_arch(self, arch):
self.archname = arch
# try to detect the archid
if arch is None:
return
arch = rhnLib.normalize_server_arch(arch)
h = rhnSQL.prepare(self._query_lookup_arch)
h.execute(archname = arch)
data = h.fetchone_dict()
if not data:
# Log it to disk, it may show interesting things
log_error("Attempt to create server with invalid arch `%s'" %
arch)
raise rhnFault(24,
_("Architecture `%s' is not supported") % arch)
self.server["server_arch_id"] = data["id"]
self.is_rpm_managed = data['is_rpm_managed']
# set the default description...
def default_description(self):
self.server["description"] = "Initial Registration Parameters:\n"\
"OS: %s\n"\
"Release: %s\n"\
"CPU Arch: %s" % (
self.server["os"], self.server["release"],
self.archname)
def __repr__(self):
# misa: looks like id can return negative numbers, so use %d
# instead of %x
# For the gory details,
# http://mail.python.org/pipermail/python-dev/2005-February/051559.html
return "<Server Class at %d: %s>\n" % (
id(self), {
"self.cert" : self.cert,
"self.server" : self.server.data,
})
__str__ = __repr__
# Return a Digital Certificate that can be placed in a file on the
# client side.
def system_id(self):
log_debug(3, self.server, self.cert)
if self.cert is None:
# need to instantiate it
cert = Certificate()
cert["system_id"] = self.server["digital_server_id"]
cert["os_release"] = self.server["release"]
cert["operating_system"] = self.server["os"]
cert["architecture"] = self.archname
cert["profile_name"] = self.server["name"]
cert["description"] = self.server["description"]
if self.user:
cert["username"] = self.user.contact["login"]
cert["type"] = self.type
cert.set_secret(self.server["secret"])
self.cert = cert
return self.cert.certificate()
# return the id of this system
def getid(self):
if not self.server.has_key("id"):
sysid = rhnSQL.Sequence("rhn_server_id_seq")()
self.server["digital_server_id"] = "ID-%09d" % sysid
# we can't reset the id column, so we need to poke into
# internals. kind of illegal, but it works...
self.server.data["id"] = (sysid, 0)
else:
sysid = self.server["id"]
return sysid
# change the base channel of a server
def change_base_channel(self, new_rel):
log_debug(3, self.server["id"], new_rel)
old_rel = self.server["release"]
# test noops
if old_rel == new_rel:
return 1
current_channels = rhnChannel.channels_for_server(self.server["id"])
# Extract the base channel off of
old_base = filter(lambda x: not x['parent_channel'],
current_channels)
# Quick sanity check
base_channels_count = len(old_base)
if base_channels_count == 1:
old_base = old_base[0]
elif base_channels_count == 0:
old_base = None
else:
raise rhnException("Server %s subscribed to multiple base channels"
% (self.server["id"], ))
#bz 442355
#Leave custom base channels alone, don't alter any of the channel subscriptions
if not CFG.RESET_BASE_CHANNEL and rhnChannel.isCustomChannel(old_base["id"]):
log_debug(3,
"Custom base channel detected, will not alter channel subscriptions")
self.server["release"] = new_rel
self.server.save()
msg = """The Red Hat Network Update Agent has detected a
change in the base version of the operating system running
on your system, additionaly you are subscribed to a custom
channel as your base channel. Due to this configuration
your channel subscriptions will not be altered.
"""
self.add_history("Updated system release from %s to %s" % (
old_rel, new_rel), msg)
self.save_history_byid(self.server["id"])
return 1
s = rhnChannel.LiteServer().init_from_server(self)
s.release = new_rel
s.arch = self.archname
# Let get_server_channels deal with the errors and raise rhnFault
target_channels = rhnChannel.guess_channels_for_server(s)
target_base = filter(lambda x: not x['parent_channel'],
target_channels)[0]
channels_to_subscribe = []
channels_to_unsubscribe = []
if old_base and old_base['id'] == target_base['id']:
# Same base channel. Preserve the currently subscribed child
# channels, just add the ones that are missing
hash = {}
for c in current_channels:
hash[c['id']] = c
for c in target_channels:
channel_id = c['id']
if hash.has_key(channel_id):
# Already subscribed to this one
del hash[channel_id]
continue
# Have to subscribe to this one
channels_to_subscribe.append(c)
# We don't want to lose subscriptions to prior channels, so don't
# do anything with hash.values()
else:
# Different base channel
channels_to_unsubscribe = current_channels
channels_to_subscribe = target_channels
rhnSQL.transaction("change_base_channel")
self.server["release"] = new_rel
self.server.save()
if not (channels_to_subscribe or channels_to_unsubscribe):
# Nothing to do, just add the history entry
self.add_history("Updated system release from %s to %s" % (
old_rel, new_rel))
self.save_history_byid(self.server["id"])
return 1
# XXX: need a way to preserve existing subscriptions to
# families so we can restore access to non-public ones.
rhnChannel.unsubscribe_channels(self.server["id"],
channels_to_unsubscribe)
rhnChannel.subscribe_channels(self.server["id"],
channels_to_subscribe)
# now that we changed, recompute the errata cache for this one
rhnSQL.Procedure("queue_server")(self.server["id"])
# Make a history note
sub_channels = rhnChannel.channels_for_server(self.server["id"])
if sub_channels:
channel_list = map(lambda a: a["name"], sub_channels)
msg = """The Red Hat Network Update Agent has detected a
change in the base version of the operating system running
on your system and has updated your channel subscriptions
to reflect that.
Your server has been automatically subscribed to the following
channels:\n%s\n""" % (string.join(channel_list, "\n"),)
else:
msg = """*** ERROR: ***
While trying to subscribe this server to software channels:
There are no channels serving release %s""" % new_rel
self.add_history("Updated system release from %s to %s" % (
old_rel, new_rel), msg)
self.save_history_byid(self.server["id"])
return 1
def take_snapshot(self, reason):
return server_lib.snapshot_server(self.server['id'], reason)
# returns true iff the base channel assigned to this system
# has been end-of-life'd
def base_channel_is_eol(self):
h = rhnSQL.prepare("""
select 1
from rhnChannel c, rhnServerChannel sc
where sc.server_id = :server_id
and sc.channel_id = c.id
and c.parent_channel IS NULL
and sysdate - c.end_of_life > 0
""")
h.execute(server_id = self.getid())
ret = h.fetchone_dict()
if ret:
return 1
return None
_query_server_custom_info = rhnSQL.Statement("""
select cdk.label,
scdv.value
from rhnCustomDataKey cdk,
rhnServerCustomDataValue scdv
where scdv.server_id = :server_id
and scdv.key_id = cdk.id
""")
def load_custom_info(self):
self.custom_info = {}
h = rhnSQL.prepare(self._query_server_custom_info)
h.execute(server_id = self.getid())
rows = h.fetchall_dict()
if not rows:
log_debug(4, "no custom info values")
return
for row in rows:
self.custom_info[row['label']] = row['value']
# load additional server information from the token definition
def load_token(self):
# Fetch token
tokens_obj = rhnFlags.get("registration_token")
if not tokens_obj:
# No tokens present
return 0
# make sure we have reserved a server_id. most likely if this
# is a new server object (just created from
# registration.new_system) then we have no associated a
# server["id"] yet -- and getid() will reserve that for us.
self.getid()
# pull in the extra information needed to fill in the
# required registration fields using tokens
user_id = tokens_obj.get_user_id()
org_id = tokens_obj.get_org_id()
self.user = rhnUser.User("", "")
if user_id is not None:
self.user.reload(user_id)
self.server["creator_id"] = user_id
self.server["org_id"] = org_id
return 0
# perform the actions required by the token (subscribing to
# channels, server groups, etc)
def use_token(self):
# Fetch token
tokens_obj = rhnFlags.get("registration_token")
if not tokens_obj:
# No token present
return 0
is_rereg_token = tokens_obj.is_rereg_token
# We get back a history of what is being done in the
# registration process
history = server_token.process_token(self.server, self.archname,
tokens_obj, self.virt_type)
if is_rereg_token:
event_name = "Reactivation via Token"
event_text = "System reactivated"
else:
event_name = "Subscription via Token"
event_text = "System created"
token_name = tokens_obj.get_names()
# now record that history nicely
self.add_history(event_name,
"%s with token <strong>%s</strong><br />\n%s" %
(event_text, token_name, history))
self.save_history_byid(self.server["id"])
#6/23/05 wregglej 157262, use get_kickstart session_id() to see if we're in the middle of a kickstart.
ks_id = tokens_obj.get_kickstart_session_id()
#4/5/05 wregglej, Added for bugzilla: 149932. Actions need to be flushed on reregistration.
#6/23/05 wregglej 157262, don't call flush_actions() if we're in the middle of a kickstart.
# It would cause all of the remaining kickstart actions to get flushed, which is bad.
if is_rereg_token and ks_id is None:
self.flush_actions()
# XXX: will need to call self.save() later to commit all that
return 0
def disable_token(self):
tokens_obj = rhnFlags.get('registration_token')
if not tokens_obj:
# Nothing to do
return
if not tokens_obj.is_rereg_token:
# Not a re-registration token - nothing to do
return
# Re-registration token - we know for sure there is only one
token_server_id = tokens_obj.get_server_id()
if token_server_id != self.getid():
# Token is not associated with this server (it may actually not be
# associated with any server)
return
server_token.disable_token(tokens_obj)
# save() will commit this
# Auto-entitlement: attempt to entitle this server to the highest
# entitlement that is available
def autoentitle(self):
# misa: as of 2005-05-27 nonlinux does not get a special treatment
# anymore (this is in connection to feature 145440 - entitlement model
# changes
entitlement_hierarchy = ['enterprise_entitled', 'sw_mgr_entitled']
any_base_entitlements = 0
for entitlement in entitlement_hierarchy:
try:
self._entitle(entitlement)
any_base_entitlements = 1
except rhnSQL.SQLSchemaError, e:
if e.errno == 20220:
# ORA-20220: (servergroup_max_members) - Server group
# membership cannot excede maximum membership
#
# ignore for now, since any_base_entitlements will throw
# an error at the end if not set
continue
if e.errno == 20287:
# ORA-20287: (invalid_entitlement) - The server can not be
# entitled to the specified level
#
# ignore for now, since any_base_entitlements will throw
# an error at the end if not set
continue
# Should not normally happen
log_error("Failed to entitle", self.server["id"], entitlement,
e.errmsg)
raise server_lib.rhnSystemEntitlementException("Unable to entitle")
except rhnSQL.SQLError, e:
log_error("Failed to entitle", self.server["id"], entitlement,
str(e))
raise server_lib.rhnSystemEntitlementException("Unable to entitle")
else:
if any_base_entitlements:
# All is fine
return
else:
raise server_lib.rhnNoSystemEntitlementsException
def _entitle(self, entitlement):
entitle_server = rhnSQL.Procedure("rhn_entitlements.entitle_server")
entitle_server(self.server['id'], entitlement)
def create_perm_cache(self):
log_debug(4)
create_perms = rhnSQL.Procedure("rhn_cache.update_perms_for_server")
create_perms(self.server['id'])
def gen_secret(self):
# Running this invalidates the cert
self.cert = None
self.server["secret"] = gen_secret()
_query_update_uuid = rhnSQL.Statement("""
update rhnServerUuid set uuid = :uuid
where server_id = :server_id
""")
_query_insert_uuid = rhnSQL.Statement("""
insert into rhnServerUuid (server_id, uuid)
values (:server_id, :uuid)
""")
def update_uuid(self, uuid, commit=1):
log_debug(3, uuid)
# XXX Should determine a way to do this dinamically
uuid_col_length = 36
if uuid is not None:
uuid = str(uuid)
if not uuid:
log_debug('Nothing to do')
return
uuid = uuid[:uuid_col_length]
server_id = self.server['id']
log_debug(4, "Trimmed uuid", uuid, server_id)
# Update this server's UUID (unique client identifier)
h = rhnSQL.prepare(self._query_update_uuid)
ret = h.execute(server_id=server_id, uuid=uuid)
log_debug(4, "execute returned", ret)
if ret != 1:
# Row does not exist, have to create it
h = rhnSQL.prepare(self._query_insert_uuid)
h.execute(server_id=server_id, uuid=uuid)
if commit:
rhnSQL.commit()
# Save this record in the database
def __save(self, channel):
if self.server.real:
server_id = self.server["id"]
self.server.save()
else: # create new entry
self.gen_secret()
server_id = self.getid()
org_id = self.server["org_id"]
if self.user:
user_id = self.user.getid()
else:
user_id = None
# some more default values
self.server["auto_deliver"] = "N"
self.server["auto_update"] = "N"
if self.user and not self.server.has_key("creator_id"):
# save the link to the user that created it if we have
# that information
self.server["creator_id"] = self.user.getid()
# and create the server entry
self.server.create(server_id)
server_lib.create_server_setup(server_id, org_id)
have_reg_token = rhnFlags.test("registration_token")
# Handle virtualization specific bits
if self.virt_uuid is not None and \
self.virt_type is not None:
rhnVirtualization._notify_guest(self.getid(),
self.virt_uuid, self.virt_type)
# if we're using a token, then the following channel
# subscription request can allow no matches since the
# token code will fix up or fail miserably later.
# subscribe the server to applicable channels
# bretm 02/17/2007 -- TODO: refactor activation key codepaths
# to allow us to not have to pass in none_ok=1 in any case
#
# This can now throw exceptions which will be caught at a higher level
if channel is not None:
channel_info = dict(rhnChannel.channel_info(channel))
log_debug(4, "eus channel id %s" % str(channel_info))
rhnChannel._subscribe_sql(server_id, channel_info['id'])
else:
rhnChannel.subscribe_server_channels(self,
none_ok=have_reg_token,
user_id=user_id)
if not have_reg_token:
# Attempt to auto-entitle, can throw the following exceptions:
# rhnSystemEntitlementException
# rhnNoSystemEntitlementsException
self.autoentitle()
# If a new server that was registered by an user (i.e. not
# with a registration token), look for this user's default
# groups
self.join_groups()
server_lib.join_rhn(org_id)
# Update the uuid - but don't commit yet
self.update_uuid(self.uuid, commit=0)
self.create_perm_cache()
# And save the extra profile data...
self.save_packages_byid(server_id, schedule=1)
self.save_hardware_byid(server_id)
self.save_history_byid(server_id)
return 0
# This is a wrapper for the above class that allows us to rollback
# any changes in case we don't succeed completely
def save(self, commit = 1, channel = None):
log_debug(3)
# attempt to preserve pending changes before we were called,
# so we set up our own transaction checkpoint
rhnSQL.transaction("save_server")
try:
self.__save(channel)
except: # roll back to what we have before and raise again
rhnSQL.rollback("save_server")
# shoot the exception up the chain
raise
else: # if we want to commit, commit all pending changes
if commit:
rhnSQL.commit()
try:
search = SearchNotify()
search.notify()
except Exception, e:
log_error("Exception caught from SearchNotify.notify().", e)
return 0
# Reload the current configuration from database using a server id.
def reload(self, server, reload_all = 0):
log_debug(4, server, "reload_all = %d" % reload_all)
if not self.server.load(int(server)):
log_error("Could not find server record for reload", server)
raise rhnFault(29, "Could not find server record in the database")
self.cert = None
# it is lame that we have to do this
h = rhnSQL.prepare("""
select label from rhnServerArch where id = :archid
""")
h.execute(archid = self.server["server_arch_id"])
data = h.fetchone_dict()
if not data:
raise rhnException("Found server with invalid numeric "
"architecture reference",
self.server.data)
self.archname = data['label']
# we don't know this one anymore (well, we could look for, but
# why would we do that?)
self.user = None
# XXX: Fix me
if reload_all:
if not self.reload_packages_byid(self.server["id"]) == 0:
return -1
if not self.reload_hardware_byid(self.server["id"]) == 0:
return -1
return 0
# Use the values we find in the cert to cause a reload of this
# server from the database.
def loadcert(self, cert, load_user = 1):
log_debug(4, cert)
# certificate is presumed to be already verified
if not isinstance(cert, Certificate):
return -1
# reload the whole thing based on the cert data
server = cert["system_id"]
row = server_lib.getServerID(server)
if row is None:
return -1
sid = row["id"]
# standard reload based on an ID
ret = self.reload(sid)
if not ret == 0:
return ret
# the reload() will never be able to fill in the username. It
# would require from the database standpoint insuring that for
# a given server we can have only one owner at any given time.
# cert includes it and it's valid because it has been verified
# through checksuming before we got here
self.user = None
#Load the user if at all possible. If it's not possible,
#self.user will be None, which should be a handled case wherever
#self.user is used.
if load_user:
# Load up the username associated with this profile
self.user = rhnUser.search(cert["username"])
##4/27/05 wregglej - Commented out this block because it was causing problems
##with rhn_check/up2date when the user that registered the system was deleted.
# if not self.user:
# log_error("Invalid username for server id",
# cert["username"], server, cert["profile_name"])
# raise rhnFault(9, "Invalid username '%s' for server id %s" %(
# cert["username"], server))
# XXX: make sure that the database thinks that the server
# registrnt is the same as this certificate thinks. The
# certificate passed checksum checks, but it never hurts to be
# too careful now with satellites and all.
return 0
# Is this server entitled?
def check_entitlement(self):
if not self.server.has_key("id"):
return None
log_debug(3, self.server["id"])
return server_lib.check_entitlement(self.server['id'])
# Given a dbiDate object, returns the UNIX representation (seconds since
# epoch)
def dbiDate2timestamp(self, dateobj):
timeString = '%s %s %s %s %s %s' % (dateobj.year, dateobj.month,
dateobj.day, dateobj.hour, dateobj.minute, dateobj.second)
return time.mktime(time.strptime(timeString, '%Y %m %d %H %M %S'))
def validateSatCert(self):
# make sure the cert is still valid
h = rhnSQL.prepare("""
select TO_CHAR(expires, 'YYYY-MM-DD HH24:MI:SS') expires
from rhnSatelliteCert
where label = 'rhn-satellite-cert'
order by version desc nulls last
""")
# Fetching just the first row will get the max version, null
# included
h.execute()
ret = h.fetchone_dict()
if not ret:
log_debug(2, "Satellite certificate not found")
return 0
expire_string = ret['expires']
expire_time = time.mktime(time.strptime(expire_string,
"%Y-%m-%d %H:%M:%S"))
now = time.time()
log_debug(3, "Certificate expiration: %s; now: time: %s (%s)" % (
expire_string, time.ctime(now), now))
# We will allow for a grace period of 8 days after the cert expires to
# give the user some time renew the certificate before we disable.
grace_period_seconds = 60 * 60 * 24 * 8
if (now > expire_time + grace_period_seconds):
log_debug(1, "Satellite certificate expired on %s" % expire_string)
return 0
return 1
def checkin(self, commit = 1, check_for_abuse = 1):
""" convenient wrapper for these thing until we clean the code up """
if not self.server.has_key("id"):
return 0 # meaningless if rhnFault not raised
return server_lib.checkin(self.server["id"], commit,
check_for_abuse=check_for_abuse)
def throttle(self):
""" convenient wrapper for these thing until we clean the code up """
if not self.server.has_key("id"):
return 1 # meaningless if rhnFault not raised
return server_lib.throttle(self.server)
def set_qos(self):
""" convenient wrapper for these thing until we clean the code up """
if not self.server.has_key("id"):
return 1 # meaningless if rhnFault not raised
return server_lib.set_qos(self.server["id"])
def join_groups(self):
""" For a new server, join server groups """
# Sanity check - we should always have a user
if not self.user:
raise rhnException("User not specified")
server_id = self.getid()
user_id = self.user.getid()
h = rhnSQL.prepare("""
select system_group_id
from rhnUserDefaultSystemGroups
where user_id = :user_id
""")
h.execute(user_id=user_id)
while 1:
row = h.fetchone_dict()
if not row:
break
server_group_id = row['system_group_id']
log_debug(5, "Subscribing server to group %s" % server_group_id)
server_lib.join_server_group(server_id, server_group_id)
def fetch_registration_message(self):
return rhnChannel.system_reg_message(self)
def process_kickstart_info(self):
log_debug(4)
tokens_obj = rhnFlags.get("registration_token")
if not tokens_obj:
log_debug(4, "no registration token found")
# Nothing to do here
return
# If there are kickstart sessions associated with this system (other
# than, possibly, the current one), mark them as failed
history = server_kickstart.terminate_kickstart_sessions(self.getid())
for k, v in history:
self.add_history(k, v)
kickstart_session_id = tokens_obj.get_kickstart_session_id()
if kickstart_session_id is None:
log_debug(4, "No kickstart_session_id associated with token %s (%s)"
% (tokens_obj.get_names(), tokens_obj.tokens))
# Nothing to do here
return
# Flush server actions
self.flush_actions()
server_id = self.getid()
action_id = server_kickstart.schedule_kickstart_sync(server_id,
kickstart_session_id)
server_kickstart.subscribe_to_tools_channel(server_id,
kickstart_session_id)
server_kickstart.schedule_virt_pkg_install(server_id,
kickstart_session_id)
# Update the next action to the newly inserted one
server_kickstart.update_ks_session_table(kickstart_session_id,
'registered', action_id, server_id)
def flush_actions(self):
server_id = self.getid()
h = rhnSQL.prepare("""
select action_id
from rhnServerAction
where server_id = :server_id
and status in (0, 1) -- Queued or Picked Up
""")
h.execute(server_id=server_id)
while 1:
row = h.fetchone_dict()
if not row:
break
action_id = row['action_id']
rhnAction.update_server_action(server_id=server_id,
action_id=action_id, status=3, result_code=-100,
result_message="Action canceled: system kickstarted or reregistered") #4/6/05 wregglej, added the "or reregistered" part.
def server_locked(self):
""" Returns true is the server is locked (for actions that are blocked) """
server_id = self.getid()
h = rhnSQL.prepare("""
select 1
from rhnServerLock
where server_id = :server_id
""")
h.execute(server_id=server_id)
row = h.fetchone_dict()
if row:
return 1
return 0
def register_push_client(self):
""" insert or update rhnPushClient for this server_id """
server_id = self.getid()
ret = server_lib.update_push_client_registration(server_id)
return ret
def register_push_client_jid(self, jid):
""" update the JID in the corresponing entry from rhnPushClient """
server_id = self.getid()
ret = server_lib.update_push_client_jid(server_id, jid)
return ret
|
colloquium/spacewalk
|
backend/server/rhnServer/server_class.py
|
Python
|
gpl-2.0
| 32,634
|
[
"CDK"
] |
647bf0caa82ec1e8e907e4e918021cc9833946f90d2172dfe6f08ed96a950949
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import sys
import BioClasses
import cPickle
import pysam
import scipy
def main():
# load data from the GTF pic
with open( "/home/paul/Resources/H_sapiens/test.hg19.chr.pic" ) as f:
genes = cPickle.load( f )
# iterate over a few Transcript objects
c = 0
for gene_id,G in genes.iteritems():
for transcript_id,T in G.transcripts.iteritems():
if c > 100:
break
print T,T.strand
# iterate through the exons
exons = T.exons.values()
exons.sort()
i = 0
for exon_id,E in T.exons.iteritems():
print exon_id,E,exons[i]
i += 1
c += 1
if __name__ == "__main__":
main()
|
polarise/python-bioclasses
|
test/test_Exon.py
|
Python
|
gpl-2.0
| 664
|
[
"pysam"
] |
d8c8b754584b68a6c8d44362c09d989727de7823c7ff2d856ce89756432f5c81
|
# Copyright 2015, 2017 IBM Corp.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
import mock
from oslo_utils.fixture import uuidsentinel
from pypowervm import const as pvm_const
from pypowervm.tasks import scsi_mapper as tsk_map
from pypowervm.tests import test_fixtures as pvm_fx
from pypowervm.utils import transaction as pvm_tx
from pypowervm.wrappers import network as pvm_net
from pypowervm.wrappers import storage as pvm_stg
from pypowervm.wrappers import virtual_io_server as pvm_vios
from nova import test
from nova.virt.powervm import media as m
class TestConfigDrivePowerVM(test.NoDBTestCase):
"""Unit Tests for the ConfigDrivePowerVM class."""
def setUp(self):
super(TestConfigDrivePowerVM, self).setUp()
self.apt = self.useFixture(pvm_fx.AdapterFx()).adpt
self.validate_vopt = self.useFixture(fixtures.MockPatch(
'pypowervm.tasks.vopt.validate_vopt_repo_exists',
autospec=True)).mock
self.validate_vopt.return_value = 'vios_uuid', 'vg_uuid'
@mock.patch('nova.api.metadata.base.InstanceMetadata')
@mock.patch('nova.virt.configdrive.ConfigDriveBuilder.make_drive')
def test_crt_cfg_dr_iso(self, mock_mkdrv, mock_meta):
"""Validates that the image creation method works."""
cfg_dr_builder = m.ConfigDrivePowerVM(self.apt)
self.assertTrue(self.validate_vopt.called)
mock_instance = mock.MagicMock()
mock_instance.uuid = uuidsentinel.inst_id
mock_files = mock.MagicMock()
mock_net = mock.MagicMock()
iso_path = '/tmp/cfgdrv.iso'
cfg_dr_builder._create_cfg_dr_iso(mock_instance, mock_files, mock_net,
iso_path)
self.assertEqual(mock_mkdrv.call_count, 1)
# Test retry iso create
mock_mkdrv.reset_mock()
mock_mkdrv.side_effect = [OSError, mock_mkdrv]
cfg_dr_builder._create_cfg_dr_iso(mock_instance, mock_files, mock_net,
iso_path)
self.assertEqual(mock_mkdrv.call_count, 2)
@mock.patch('tempfile.NamedTemporaryFile')
@mock.patch('nova.virt.powervm.vm.get_pvm_uuid')
@mock.patch('pypowervm.tasks.scsi_mapper.build_vscsi_mapping')
@mock.patch('pypowervm.tasks.scsi_mapper.add_map')
@mock.patch('os.path.getsize')
@mock.patch('pypowervm.tasks.storage.upload_vopt')
@mock.patch('nova.virt.powervm.media.ConfigDrivePowerVM.'
'_create_cfg_dr_iso')
def test_create_cfg_drv_vopt(self, mock_ccdi, mock_upl, mock_getsize,
mock_addmap, mock_bldmap, mock_vm_id,
mock_ntf):
cfg_dr = m.ConfigDrivePowerVM(self.apt)
mock_instance = mock.MagicMock()
mock_instance.uuid = uuidsentinel.inst_id
mock_upl.return_value = 'vopt', 'f_uuid'
fh = mock_ntf.return_value.__enter__.return_value
fh.name = 'iso_path'
wtsk = mock.create_autospec(pvm_tx.WrapperTask, instance=True)
ftsk = mock.create_autospec(pvm_tx.FeedTask, instance=True)
ftsk.configure_mock(wrapper_tasks={'vios_uuid': wtsk})
def test_afs(add_func):
# Validate the internal add_func
vio = mock.create_autospec(pvm_vios.VIOS)
self.assertEqual(mock_addmap.return_value, add_func(vio))
mock_vm_id.assert_called_once_with(mock_instance)
mock_bldmap.assert_called_once_with(
None, vio, mock_vm_id.return_value, 'vopt')
mock_addmap.assert_called_once_with(vio, mock_bldmap.return_value)
wtsk.add_functor_subtask.side_effect = test_afs
# calculate expected file name
expected_file_name = 'cfg_' + mock_instance.uuid.replace('-', '')
allowed_len = pvm_const.MaxLen.VOPT_NAME - 4 # '.iso' is 4 chars
expected_file_name = expected_file_name[:allowed_len] + '.iso'
cfg_dr.create_cfg_drv_vopt(
mock_instance, 'files', 'netinfo', ftsk, admin_pass='pass')
mock_ntf.assert_called_once_with(mode='rb')
mock_ccdi.assert_called_once_with(mock_instance, 'files', 'netinfo',
'iso_path', admin_pass='pass')
mock_getsize.assert_called_once_with('iso_path')
mock_upl.assert_called_once_with(self.apt, 'vios_uuid', fh,
expected_file_name,
mock_getsize.return_value)
wtsk.add_functor_subtask.assert_called_once()
def test_sanitize_network_info(self):
network_info = [{'type': 'lbr'}, {'type': 'pvm_sea'},
{'type': 'ovs'}]
cfg_dr_builder = m.ConfigDrivePowerVM(self.apt)
resp = cfg_dr_builder._sanitize_network_info(network_info)
expected_ret = [{'type': 'vif'}, {'type': 'vif'},
{'type': 'ovs'}]
self.assertEqual(resp, expected_ret)
@mock.patch('pypowervm.wrappers.storage.VG', autospec=True)
@mock.patch('pypowervm.tasks.storage.rm_vg_storage', autospec=True)
@mock.patch('nova.virt.powervm.vm.get_pvm_uuid')
@mock.patch('pypowervm.tasks.scsi_mapper.gen_match_func', autospec=True)
@mock.patch('pypowervm.tasks.scsi_mapper.find_maps', autospec=True)
@mock.patch('pypowervm.wrappers.virtual_io_server.VIOS', autospec=True)
@mock.patch('taskflow.task.FunctorTask', autospec=True)
def test_dlt_vopt(self, mock_functask, mock_vios, mock_find_maps, mock_gmf,
mock_uuid, mock_rmstg, mock_vg):
cfg_dr = m.ConfigDrivePowerVM(self.apt)
wtsk = mock.create_autospec(pvm_tx.WrapperTask, instance=True)
ftsk = mock.create_autospec(pvm_tx.FeedTask, instance=True)
ftsk.configure_mock(wrapper_tasks={'vios_uuid': wtsk})
# Test with no media to remove
mock_find_maps.return_value = []
cfg_dr.dlt_vopt('inst', ftsk)
mock_uuid.assert_called_once_with('inst')
mock_gmf.assert_called_once_with(pvm_stg.VOptMedia)
wtsk.add_functor_subtask.assert_called_once_with(
tsk_map.remove_maps, mock_uuid.return_value,
match_func=mock_gmf.return_value)
ftsk.get_wrapper.assert_called_once_with('vios_uuid')
mock_find_maps.assert_called_once_with(
ftsk.get_wrapper.return_value.scsi_mappings,
client_lpar_id=mock_uuid.return_value,
match_func=mock_gmf.return_value)
mock_functask.assert_not_called()
# Test with media to remove
mock_find_maps.return_value = [mock.Mock(backing_storage=media)
for media in ['m1', 'm2']]
def test_functor_task(rm_vopt):
# Validate internal rm_vopt function
rm_vopt()
mock_vg.get.assert_called_once_with(
self.apt, uuid='vg_uuid', parent_type=pvm_vios.VIOS,
parent_uuid='vios_uuid')
mock_rmstg.assert_called_once_with(
mock_vg.get.return_value, vopts=['m1', 'm2'])
return 'functor_task'
mock_functask.side_effect = test_functor_task
cfg_dr.dlt_vopt('inst', ftsk)
mock_functask.assert_called_once()
ftsk.add_post_execute.assert_called_once_with('functor_task')
def test_mgmt_cna_to_vif(self):
mock_cna = mock.Mock(spec=pvm_net.CNA, mac="FAD4433ED120")
# Run
cfg_dr_builder = m.ConfigDrivePowerVM(self.apt)
vif = cfg_dr_builder._mgmt_cna_to_vif(mock_cna)
# Validate
self.assertEqual(vif.get('address'), "fa:d4:43:3e:d1:20")
self.assertEqual(vif.get('id'), 'mgmt_vif')
self.assertIsNotNone(vif.get('network'))
self.assertEqual(1, len(vif.get('network').get('subnets')))
subnet = vif.get('network').get('subnets')[0]
self.assertEqual(6, subnet.get('version'))
self.assertEqual('fe80::/64', subnet.get('cidr'))
ip = subnet.get('ips')[0]
self.assertEqual('fe80::f8d4:43ff:fe3e:d120', ip.get('address'))
def test_mac_to_link_local(self):
mac = 'fa:d4:43:3e:d1:20'
self.assertEqual('fe80::f8d4:43ff:fe3e:d120',
m.ConfigDrivePowerVM._mac_to_link_local(mac))
mac = '00:00:00:00:00:00'
self.assertEqual('fe80::0200:00ff:fe00:0000',
m.ConfigDrivePowerVM._mac_to_link_local(mac))
mac = 'ff:ff:ff:ff:ff:ff'
self.assertEqual('fe80::fdff:ffff:feff:ffff',
m.ConfigDrivePowerVM._mac_to_link_local(mac))
|
klmitch/nova
|
nova/tests/unit/virt/powervm/test_media.py
|
Python
|
apache-2.0
| 9,125
|
[
"FEFF"
] |
646138093def824b82764dd42a8d9d7f55b5ecce3c515dc9f27e924b843f8568
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.