text
stringlengths
12
1.05M
repo_name
stringlengths
5
86
path
stringlengths
4
191
language
stringclasses
1 value
license
stringclasses
15 values
size
int32
12
1.05M
keyword
listlengths
1
23
text_hash
stringlengths
64
64
# Copyright (c) 2002 Myers "icepick" Carpenter # This file is licensed under the # GNU Lesser General Public License v2.1. # See the file COPYING or visit http://www.gnu.org/ for details. __revision__ = "$Id: __init__.py,v 1.5 2002/12/02 19:58:55 myers_carpenter Exp $" from egtp.mencode.mencode import *
zooko/egtp_new
egtp/mencode/__init__.py
Python
lgpl-2.1
315
[ "VisIt" ]
194160eb94cc570ef2534a89f9fa641acb7d08ae9a8d03b6bd4bd90e84ff3f7a
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Functional tests for a :ref:`weblayer` application using the :ref:`request handler api`. """ import unittest class TestBasics(unittest.TestCase): """ Sanity check the basics of hooking up a request handler and returning a simple response. """ def make_app(self, mapping): from webtest import TestApp from weblayer import Bootstrapper, WSGIApplication config = { 'check_xsrf': False, 'cookie_secret': '...', 'static_files_path': 'static', 'template_directories': ['templates'] } bootstrapper = Bootstrapper(settings=config, url_mapping=mapping) application = WSGIApplication(*bootstrapper()) return TestApp(application) def test_hello_world(self): """ Can we hook up a request handler with a positional argument from the request path and return a simple response? """ from weblayer import RequestHandler class Handler(RequestHandler): def get(self, world): return u'hello %s' % world mapping = [(r'/(.*)', Handler)] app = self.make_app(mapping) res = app.get('/world') self.assertTrue(res.body == 'hello world') def test_unicode_response(self): """ Can we return unicode characters? """ from weblayer import RequestHandler class Handler(RequestHandler): def get(self): return u'hello Ð' mapping = [(r'/', Handler)] app = self.make_app(mapping) res = app.get('/') self.assertTrue(res.unicode_body == u'hello Ð') def test_mapping(self): """ Can we hook up multiple handlers? """ from weblayer import RequestHandler class A(RequestHandler): def get(self): return 'a' class B(RequestHandler): def get(self): return 'b' mapping = [( r'/a', A ), ( r'/.*', B ) ] app = self.make_app(mapping) res = app.get('/a') self.assertTrue(res.body == 'a') res = app.get('/foobar') self.assertTrue(res.body == 'b') def test_groups(self): """ Each group in the request path should be passed as to the handler method as a positional argument. """ from weblayer import RequestHandler class Handler(RequestHandler): def get(self, *args): return ''.join(args) mapping = [(r'/(.)/(.)/(.)', Handler)] app = self.make_app(mapping) res = app.get('/a/b/c') self.assertTrue(res.body == 'abc') def test_head_method(self): """ HEAD requests should use ``Hander.get`` iff ``'head'`` is exposed, ``get()`` is defined and ``head()`` isn't. """ from weblayer import RequestHandler class Handler(RequestHandler): def get(self): return 'hello' mapping = [(r'/', Handler)] app = self.make_app(mapping) res = app.head('/') self.assertTrue(res.headers['Content-Length'] == '5') def test_form_post(self): """ POST requests should call ``Hander.post``. """ from weblayer import RequestHandler class Handler(RequestHandler): __all__ = ('get', 'post') def get(self): form = u'<form method="post"><input name="name" /></form>' return u'What is your name? %s' % form def post(self): return u'Hello %s!' % self.request.params.get('name') mapping = [(r'/', Handler)] app = self.make_app(mapping) res = app.get('/') form = res.form form['name'] = 'Brian' res = form.submit() self.assertTrue(res.body == 'Hello Brian!') class TestResponse(unittest.TestCase): """ Sanity check response generation. """ def make_app(self, mapping): from webtest import TestApp from weblayer import Bootstrapper, WSGIApplication config = { 'cookie_secret': '...', 'static_files_path': 'static', 'template_directories': ['templates'] } bootstrapper = Bootstrapper(settings=config, url_mapping=mapping) application = WSGIApplication(*bootstrapper()) return TestApp(application) def test_return_basestring(self): """ Returning a ``basestring`` from a request handler method should update the ``response.body``. """ from weblayer import RequestHandler class A(RequestHandler): def get(self): return 'hello' class B(RequestHandler): def get(self): return u'hellö' mapping = [(r'/a', A), (r'/b', B)] app = self.make_app(mapping) res = app.get('/a') self.assertTrue(res.body == 'hello') res = app.get('/b') self.assertTrue(res.unicode_body == u'hellö') def test_return_none(self): """ Returning ``None`` should fallback on ``self.response``. """ from weblayer import RequestHandler class A(RequestHandler): def get(self): self.response.body = 'elephants' return None mapping = [(r'/', A)] app = self.make_app(mapping) res = app.get('/') self.assertTrue(res.body == 'elephants') def test_return_response(self): """ Returning an :py:class:`~weblayer.interfaces.IResponse` from a request handler method should overwrite and use ``self.response``. """ from weblayer import RequestHandler from weblayer.base import Response class A(RequestHandler): def get(self): response = Response(body='fandango', request=self.request) response.environ['weblayer.test_return_response'] = 1 return response mapping = [(r'/', A)] app = self.make_app(mapping) res = app.get('/', extra_environ={'weblayer.test_return_response': 0}) self.assertTrue(res.body == 'fandango') self.assertTrue(res.environ['weblayer.test_return_response']) def test_return_data(self): """ Returning something other than a ``basestring``, ``None`` or :py:class:`~weblayer.interfaces.IResponse`` should JSON encode it. """ from weblayer import RequestHandler class A(RequestHandler): def get(self): return {'a': 'b'} class B(RequestHandler): def get(self): return {'a': u'ß'} mapping = [(r'/a', A), (r'/b', B)] app = self.make_app(mapping) res = app.get('/a') self.assertTrue(res.body == '{"a": "b"}') res = app.get('/b') self.assertTrue(res.unicode_body == u'{"a": "ß"}') class TestSettings(unittest.TestCase): """ Sanity check ``self.settings``. """ def make_app(self, config, mapping): from webtest import TestApp from weblayer import Bootstrapper, WSGIApplication bootstrapper = Bootstrapper(settings=config, url_mapping=mapping) application = WSGIApplication(*bootstrapper()) return TestApp(application) def test_required_settings_misses_raises_error(self): """ You must provide required settings by default. """ from weblayer import RequestHandler config = {} mapping = [(r'/', RequestHandler)] self.assertRaises( KeyError, self.make_app, config, mapping ) def test_settings_available(self): """ Settings are available as self.settings. """ from weblayer import RequestHandler class A(RequestHandler): def get(self): return '%s %s' % ( self.settings['cookie_secret'], self.settings.get('not_present', None) ) config = { 'cookie_secret': '...', 'static_files_path': 'static', 'template_directories': ['templates'] } mapping = [(r'/', A)] app = self.make_app(config, mapping) res = app.get('/') self.assertTrue(res.body == '... None') class TestAuth(unittest.TestCase): """ Sanity check ``self.auth``. """ def make_app(self, mapping): from webtest import TestApp from weblayer import Bootstrapper, WSGIApplication config = { 'cookie_secret': '...', 'static_files_path': 'static', 'template_directories': ['templates'] } bootstrapper = Bootstrapper(settings=config, url_mapping=mapping) application = WSGIApplication(*bootstrapper()) return TestApp(application) def test_unauthenticated(self): """ If there's no ``environ['REMOTE_USER']``, ``self.auth.is_authenticated`` is ``False`` and ``self.auth.current_user`` is ``None``. """ from weblayer import RequestHandler class A(RequestHandler): def get(self): return '%s %s' % ( self.auth.is_authenticated, self.auth.current_user ) mapping = [(r'/', A)] app = self.make_app(mapping) res = app.get('/') self.assertTrue(res.body == 'False None') def test_authenticated(self): """ If there is a ``environ['REMOTE_USER']``, ``self.auth.is_authenticated`` is ``True`` and ``self.auth.current_user`` is ``environ['REMOTE_USER']``. """ from weblayer import RequestHandler class A(RequestHandler): def get(self): return '%s %s' % ( self.auth.is_authenticated, self.auth.current_user ) mapping = [(r'/', A)] app = self.make_app(mapping) res = app.get('/', extra_environ={'REMOTE_USER': '123456'}) self.assertTrue(res.body == 'True 123456') class TestCookies(unittest.TestCase): """ Sanity check ``self.cookies``. """ def make_app(self, cookie_secret, mapping): from webtest import TestApp from weblayer import Bootstrapper, WSGIApplication config = { 'cookie_secret': cookie_secret, 'static_files_path': 'static', 'template_directories': ['templates'] } bootstrapper = Bootstrapper(settings=config, url_mapping=mapping) application = WSGIApplication(*bootstrapper()) return TestApp(application) def test_set_and_get(self): """ We can set and get a cookie. """ from weblayer import RequestHandler class SetCookie(RequestHandler): def get(self): self.cookies.set('name', 'value') class GetCookie(RequestHandler): def get(self): return self.cookies.get('name') mapping = [(r'/set', SetCookie), (r'/get', GetCookie)] app = self.make_app('abc', mapping) app.get('/set') res = app.get('/get') self.assertTrue(res.body == 'value') def test_forged_returns_none(self): """ If we set the value of the cookie without using ``self.cookies.set()``, ``self.cookies.get()`` returns None. """ from weblayer import RequestHandler class SetCookie(RequestHandler): def get(self): self.response.set_cookie('name', value='value') class GetCookie(RequestHandler): def get(self): return '%s' % self.cookies.get('name') mapping = [(r'/set', SetCookie), (r'/get', GetCookie)] app = self.make_app('abc', mapping) app.get('/set') res = app.get('/get') self.assertTrue(res.body == 'None') class TestStatic(unittest.TestCase): """ Sanity check ``self.static``. """ def setUp(self): """ Make sure ``./static/foo.js`` contains only:: var foo = {}; """ from os.path import dirname, join as join_path file_path = join_path(dirname(__file__), 'static', 'foo.js') sock = open(file_path, 'w') sock.write('var foo = {};') sock.close() def tearDown(self): """ Make sure ``./static/foo.js`` contains only:: var foo = {}; """ from os.path import dirname, join as join_path file_path = join_path(dirname(__file__), 'static', 'foo.js') sock = open(file_path, 'w') sock.write('var foo = {};') sock.close() def make_app(self, mapping, **extra_config): from os.path import dirname, join as join_path from webtest import TestApp from weblayer import Bootstrapper, WSGIApplication config = { 'cookie_secret': '...', 'static_files_path': join_path(dirname(__file__), 'static'), 'template_directories': ['templates'] } config.update(extra_config) bootstrapper = Bootstrapper(settings=config, url_mapping=mapping) application = WSGIApplication(*bootstrapper()) return TestApp(application) def test_no_qs_if_file_doesnt_exist(self): """ If the file doesn't exist, don't add a ``v=...`` part to the query string. """ from weblayer import RequestHandler class A(RequestHandler): def get(self): return self.static.get_url('not_there.js') mapping = [(r'/', A)] app = self.make_app(mapping) res = app.get('/') self.assertTrue(res.body == 'http://localhost/static/not_there.js') def test_qs_if_file_exists(self): """ If the file does exist, add the first few chars of a hash digest of of the file contents to the query string. """ from weblayer import RequestHandler class A(RequestHandler): def get(self): return self.static.get_url('foo.js') mapping = [(r'/', A)] app = self.make_app(mapping) res = app.get('/') self.assertTrue(res.body == 'http://localhost/static/foo.js?v=fc075b5') def test_qs_cached_in_memory_despite_file_content_changing(self): """ The hexdigest snippet is cached in memory and doesn't automatically update when the underlying file changes. """ from os.path import dirname, join as join_path from weblayer import RequestHandler class A(RequestHandler): def get(self): return self.static.get_url('foo.js') mapping = [(r'/', A)] app = self.make_app(mapping) res = app.get('/') self.assertTrue(res.body == 'http://localhost/static/foo.js?v=fc075b5') # change the file file_path = join_path(dirname(__file__), 'static', 'foo.js') sock = open(file_path, 'w') sock.write('var foo = {\'changed\': true};') sock.close() # the qs *hasn't* changed res = app.get('/') self.assertTrue(res.body == 'http://localhost/static/foo.js?v=fc075b5') def test_qs_changed_when_cache_cleared(self): """ If we clear the cache, then the underlying file content change is reflected in the query string. """ from os.path import dirname, join as join_path from weblayer import RequestHandler class A(RequestHandler): def get(self): return self.static.get_url('foo.js') class B(RequestHandler): def get(self): """ n.b.: this hack only clears the cache for this process. Don't do this in real code. """ static_files = self.settings['static_files_path'] file_path = join_path(static_files, 'foo.js') del self.static._cache[file_path] mapping = [(r'/a', A), (r'/b', B)] app = self.make_app(mapping) res = app.get('/a') self.assertTrue(res.body == 'http://localhost/static/foo.js?v=fc075b5') # change the file file_path = join_path(dirname(__file__), 'static', 'foo.js') sock = open(file_path, 'w') sock.write('var foo = {\'changed\': true};') sock.close() # clear the cache res = app.get('/b') # the qs *has* changed res = app.get('/a') self.assertTrue(res.body == 'http://localhost/static/foo.js?v=114b07a') # clear the cache res = app.get('/b') def test_host_url(self): """ Uses the host url of the request. """ from weblayer import RequestHandler class A(RequestHandler): def get(self): return self.static.get_url('foo.js') mapping = [(r'/', A)] app = self.make_app(mapping) res = app.get('/', extra_environ={'HTTP_HOST': 'foo.com:1234'}) self.assertTrue( res.body == 'http://foo.com:1234/static/foo.js?v=fc075b5' ) def test_static_host_url(self): """ Unless ``settings['static_host_url']`` is provided. """ from weblayer import RequestHandler class A(RequestHandler): def get(self): return self.static.get_url('foo.js') mapping = [(r'/', A)] app = self.make_app(mapping, static_host_url='http://static.foo.com') res = app.get('/') self.assertTrue( res.body == 'http://static.foo.com/static/foo.js?v=fc075b5' ) res = app.get('/', extra_environ={'HTTP_HOST': 'foo.com:1234'}) self.assertTrue( res.body == 'http://static.foo.com/static/foo.js?v=fc075b5' ) class TestXSRF(unittest.TestCase): """ Sanity check validating against XSRF attacks. """ def make_app(self, mapping): from webtest import TestApp from weblayer import Bootstrapper, WSGIApplication config = { 'cookie_secret': '...', 'static_files_path': 'static', 'template_directories': ['templates'] } bootstrapper = Bootstrapper(settings=config, url_mapping=mapping) application = WSGIApplication(*bootstrapper()) return TestApp(application) def test_form_post_using_xsrf_input(self): """ If ``self.xsrf_input`` is included in the form POST it validates. """ from weblayer import RequestHandler class Handler(RequestHandler): __all__ = ('get', 'post') def get(self): inputs = u'%s<input name="name" />' % self.xsrf_input form = u'<form method="post">%s</form>' % inputs return u'What is your name? %s' % form def post(self): return u'Hello %s!' % self.request.params.get('name') mapping = [(r'/', Handler)] app = self.make_app(mapping) res = app.get('/') form = res.form form['name'] = 'Brian' res = form.submit() self.assertTrue(res.body == 'Hello Brian!') def test_form_post_without_xsrf_input(self): """ If ``self.xsrf_input`` is not included in the form POST it, fails to validate. """ from webtest import AppError from weblayer import RequestHandler class Handler(RequestHandler): __all__ = ('get', 'post') def get(self): form = u'<form method="post"><input name="name" /></form>' return u'What is your name? %s' % form def post(self): """ Never gets called """ mapping = [(r'/', Handler)] app = self.make_app(mapping) res = app.get('/') form = res.form form['name'] = 'Brian' self.assertRaises(AppError, form.submit) try: result = form.submit() except AppError, err: self.assertTrue('403 Forbidden' in str(err)) class TestError(unittest.TestCase): """ Sanity check ``self.error()``. """ def make_app(self, mapping): from webtest import TestApp from weblayer import Bootstrapper, WSGIApplication config = { 'cookie_secret': '...', 'static_files_path': 'static', 'template_directories': ['templates'] } bootstrapper = Bootstrapper(settings=config, url_mapping=mapping) application = WSGIApplication(*bootstrapper()) return TestApp(application) def test_return_vanilla_error(self): """ Returning ``self.error()`` results in a 500 response. """ from webtest import AppError from weblayer import RequestHandler class A(RequestHandler): def get(self): return self.error() mapping = [(r'/', A)] app = self.make_app(mapping) self.assertRaises(AppError, app.get, '/') try: result = app.get('/') except AppError, err: self.assertTrue('500 Internal Server Error' in str(err)) def test_return_specific_error(self): """ Returning a specific error results in an appropriate response. """ from webtest import AppError from weblayer import RequestHandler class A(RequestHandler): def get(self): return self.error(status=501) mapping = [(r'/', A)] app = self.make_app(mapping) self.assertRaises(AppError, app.get, '/') try: result = app.get('/') except AppError, err: self.assertTrue('501 Not Implemented' in str(err)) class TestRedirect(unittest.TestCase): """ Sanity check ``self.redirect()``. """ def make_app(self, mapping): from webtest import TestApp from weblayer import Bootstrapper, WSGIApplication config = { 'cookie_secret': '...', 'static_files_path': 'static', 'template_directories': ['templates'] } bootstrapper = Bootstrapper(settings=config, url_mapping=mapping) application = WSGIApplication(*bootstrapper()) return TestApp(application) def test_redirect(self): """ Returning ``self.redirect(url)`` results in a 302 response. """ from webtest import AppError from weblayer import RequestHandler class A(RequestHandler): def get(self): return self.redirect('/b') mapping = [(r'/', A)] app = self.make_app(mapping) status = app.get('/').status self.assertTrue(status == '302 Found') def test_redirect_location(self): """ Returning ``self.redirect(url)`` uses either the host url or full url if provided. """ from webtest import AppError from weblayer import RequestHandler class A(RequestHandler): def get(self): return self.redirect('/b') class B(RequestHandler): def get(self): return self.redirect('http://foo.com/c') mapping = [(r'/a', A), (r'/b', B)] app = self.make_app(mapping) res = app.get('/a').follow() self.assertTrue(res.headers['location'] == 'http://foo.com/c') def test_permanent_redirect(self): """ Returning ``self.redirect(url, permanent=True)`` returns a 301. """ from webtest import AppError from weblayer import RequestHandler class A(RequestHandler): def get(self): return self.redirect('/b', permanent=True) mapping = [(r'/', A)] app = self.make_app(mapping) status = app.get('/').status self.assertTrue(status == '301 Moved Permanently') class TestRender(unittest.TestCase): """ Sanity check ``self.render()``. """ def make_app(self, mapping): from os.path import dirname, join as join_path from webtest import TestApp from weblayer import Bootstrapper, WSGIApplication config = { 'cookie_secret': '...', 'static_files_path': join_path(dirname(__file__), 'static'), 'template_directories': [join_path(dirname(__file__), 'templates')] } bootstrapper = Bootstrapper(settings=config, url_mapping=mapping) application = WSGIApplication(*bootstrapper()) return TestApp(application) def test_builtins(self): """ Returning ``self.render(tmpl_name)`` renders ``tmpl_name``, passing through keyword arguments to the template's global namespace, along with the builtins defined in :py:mod:`~weblayer.template`. """ import datetime from weblayer import RequestHandler class A(RequestHandler): def get(self): kwargs = { 'url': 'http://foo.com/b ar', 'data': {'a': u'b'}, } return self.render('builtins.tmpl', **kwargs) mapping = [(r'/', A)] app = self.make_app(mapping) res = app.get('/') self.assertTrue('{&quot;a&quot;: &quot;b&quot;}' in res) self.assertTrue('http%3A%2F%2Ffoo.com%2Fb+ar' in res) self.assertTrue(datetime.datetime.min in res) def test_request_variables(self): """ Returning ``self.render(tmpl_name)`` renders ``tmpl_name``, passing through the ``request``, ``current_user``, ``get_static_url`` and ``xsrf_input``. """ from webtest import AppError from weblayer import RequestHandler class A(RequestHandler): __all__ = ('get', 'post') def get(self): return self.render('request.tmpl') def post(self): return u'Hello %s!' % self.request.params.get('name') mapping = [(r'/', A)] app = self.make_app(mapping) res = app.get('/?foo=bar', extra_environ={'REMOTE_USER': 'brian'}) self.assertTrue('foo: bar' in res) self.assertTrue('user: brian' in res) self.assertTrue( 'static url: http://localhost/static/not_present.js' in res ) form = res.form form['name'] = 'Brian' res = form.submit() self.assertTrue(res.body == 'Hello Brian!')
thruflo/weblayer
src/weblayer/tests/ftests/test_api.py
Python
unlicense
29,769
[ "Brian" ]
37423f759fe89a892391678d97d627907df7a938585a664115b041325f6700bd
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Created on Tue Dec 13 16:24:50 2016 @author: david """ """ Combinations to run: - each main arg with - different combinations of: -file arrangements - what order commands are called on a directory structure - which of the allowable ways of specify the directory/file is used (These are what I need to test, not different combinations of with/without -t argument etc) 1. Each of the individual commands """ import os import shutil import datetime import subprocess baseDir = os.path.dirname(os.path.realpath(__file__)) + os.sep qBinary = False orthofinder = baseDir + "../orthofinder/orthofinder.py" my_env = os.environ.copy() my_env["PATH"] = "/home/david/software/ncbi-blast-2.2.28+/bin:" + my_env["PATH"] workspace = baseDir + "ArgumentCombinations/" fileStore = workspace + "FileStore/" fasta = "Mycoplasma/" fastaExtra = "Mycoplasma_extra/" dirs = [fasta, fastaExtra] d_fasta = workspace + fasta d_fastaExtra = workspace + fastaExtra #blastDir = "" #groupsDir = "" #orthologuesDir = "" #def RunOrthoFinder(commands): # capture = subprocess.Popen("python %s %s" % (orthofinder, commands), shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) # stdout = "".join([x for x in capture.stdout]) # stderr = "".join([x for x in capture.stderr]) # return stdout, stderr def Date(): return datetime.date.today().strftime("%b%d") class Workspace(): def __init__(self): pass def __enter__(self): for d in dirs: shutil.copytree(fileStore + d, workspace + d) def __exit__(self, type, value, traceback): for d in dirs: shutil.rmtree(workspace + d) def CreateCleanDirectories(): for d in dirs: if os.path.exists(workspace + d): shutil.rmtree(workspace + d) for d in dirs: shutil.copytree(fileStore + d, workspace + d) def PrintHeading(message): print("\n\n\n\n\n****** " + message + " ******\n") def run_combinations(): """ Run through a set of OrthoFinder analyses """ CreateCleanDirectories() PrintHeading("Get version") capture = subprocess.Popen(["python", orthofinder, "-h"], stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout = "".join([x for x in capture.stdout]) iVersion = stdout.find("version") + len("version") + 1 version = stdout[iVersion:].split(" ", 1)[0] # with Workspace(): # Input: 4 fasta files # Output: ortholgoues with 4 possible roots for the species tree PrintHeading("Running from FASTA files") subprocess.call(["python", orthofinder, "-f", d_fasta], env=my_env) d_ogs = d_fasta + "Results_%s/" % Date() d_orths = d_ogs + "Orthologues_%s/" % Date() # Input: From trees from one of the 4 possible roots PrintHeading("Running from Trees (4 possible species tree roots, one specified)") # raise Exception("Running from Trees (4 possible species tree roots, one specified)") subprocess.call(["python", orthofinder, "-ft", d_orths, "-s", d_orths + "Orthologues_using_outgroup_2/SpeciesTree_rooted_at_outgroup_2.txt"], env=my_env) PrintHeading("Extra Species") subprocess.call(["python", orthofinder, "-b", d_ogs + "WorkingDirectory", "-f", d_fastaExtra], env=my_env) PrintHeading("From groups, only write sequences and exit") subprocess.call(["python", orthofinder, "-os", "-fg", d_ogs + "WorkingDirectory/clusters_OrthoFinder_v%s_I1.5.txt_id_pairs.txt" % version], env=my_env) PrintHeading("Rerun from BLAST but stop at groups") subprocess.call(["python", orthofinder, "-b", d_ogs + "WorkingDirectory", "-og"], env=my_env) PrintHeading("From groups (specify file, specify species tree)") subprocess.call(["python", orthofinder, "-fg", d_ogs + "WorkingDirectory/clusters_OrthoFinder_v%s_I1.5.txt_id_pairs.txt" % version, "-s", d_ogs + "WorkingDirectory/Orthologues_%s/Orthologues_using_outgroup_1/SpeciesTree_rooted_at_outgroup_1.txt" % Date()], env=my_env) PrintHeading("From groups, use MSAs (specify file, specify species tree)") subprocess.call(["python", orthofinder, "-M", "msa", "-fg", d_ogs + "WorkingDirectory/clusters_OrthoFinder_v%s_I1.5.txt_id_pairs.txt" % version, "-s", d_ogs + "WorkingDirectory/Orthologues_%s/Orthologues_using_outgroup_1/SpeciesTree_rooted_at_outgroup_1.txt" % Date()], env=my_env) # main_args = ["-f " + fastaDir, # "-b " + blastDir, # "-f " + fastaDir + " -b " + blastDir, # "-fg " + groupsDir, # "-ft " + orthologuesDir] # # if __name__ == "__main__": run_combinations()
davidemms/OrthoFinder
tests/TestArgumentCombinations.py
Python
gpl-3.0
4,712
[ "BLAST" ]
9ea4d85b2d31ecf64e5c4a5ffc830914b8c57a4672ca485ec8dd56d45e8830b0
''' Script will test the RDkit python code for conformance with the agreed format using yapf. For each Python file that is found in $RDBASE (excluding the build and External directories), yapf is used with the style configuration in $RDBASE/setup.cfg. If a change is required, the difference is printed. At the end of the process, all non-conformant files are listed and the required yapf command(s) printed. If changes are found, the script will exit with error code 1, otherwise 0. ''' from __future__ import print_function import os from yapf.yapflib.yapf_api import FormatCode import sys rdbase = os.environ.get('RDBASE', '') styleConfig = os.path.join(rdbase, 'setup.cfg') excludeDirs = [os.path.join(rdbase, 'build'), os.path.join(rdbase, 'External'), ] def pythonFiles(dirname=rdbase): """ Find all python files below directory dirname """ for root, _, files in os.walk(dirname): if any(root.startswith(d) for d in excludeDirs): continue for file in files: if file.endswith(".py"): yield os.path.join(root, file) def yapfChanges(filename): """ Use yapf with the default settings to format file filename """ try: with open(filename) as f: codeBefore = f.read() except UnicodeError: with open(filename, encoding='latin-1') as f: codeBefore = f.read() try: changes, changed = FormatCode(codeBefore, style_config=styleConfig, print_diff=True, filename=filename) except Exception: print(filename) raise if changed: print(changes) return changed if __name__ == "__main__": changedFiles = [] for s in pythonFiles(): if yapfChanges(s): changedFiles.append(s) print() if changedFiles: print('yapf will make changes to the following files:') print('\n'.join(sorted(changedFiles))) print('To apply the required changes to your code use the following command(s)') for s in sorted(set(s.replace(rdbase, '').split(os.sep)[1] for s in changedFiles)): print('yapf --style $RDBASE/setup.cfg --in-place --recursive $RDBASE/{0}'.format(s)) sys.exit(1) print('Code complies with the agreed formatting rules.') sys.exit(0)
rvianello/rdkit
Scripts/PythonFormat.py
Python
bsd-3-clause
2,200
[ "RDKit" ]
5caf8ec57197c33f2cb4f90559c37979692d967a65b8ca8a9f83bfe63f14fb2c
from importer import * import os import numpy as np from datetime import datetime as dt import re def gen_logfile_name(plateifu): plate, ifu = plateifu.split('-') status_file_dir = os.path.join( os.environ['PCAY_RESULTSDIR'], plate) status_file = os.path.join(status_file_dir, '{}.log'.format(plateifu)) return status_file def log_file_exists(plateifu): status_file = gen_logfile_name(plateifu) return os.path.exists(status_file) def log_indicates_complete(plateifu): '''judges galaxy completeness and rerun priority based on log-file ''' status_file = gen_logfile_name(plateifu) if not log_file_exists(plateifu): # if log-file hasn't been written, a previous parent process probably died # before getting to it, so probably good to re-run complete, hipri = False, True else: # if log file exists, check contents of last two lines for graceful exit and analysis success with open(status_file, 'r') as logf: lines = logf.readlines() if not re.search('ENDING GRACEFULLY', lines[-1]): # if last line of logfile does not indicate graceful exit from analysis # this is probably a segfault case, so do not prioritize for re-run complete, hipri = False, False elif not re.search('SUCCESS', lines[-2]): # if second-last line of logfile does not indicate success # this is probably some other error like missing data, # and it would be worth trying to re-run complete, hipri = False, True else: # if last line indicates graceful exit, AND second-last line indicates overall success # this galaxy is done, and shouldn't be re-run at all complete, hipri = True, False return complete, hipri def write_log_file(plateifu, msg, clobber=False): '''write a log file ''' status_file = gen_logfile_name(plateifu) status_file_dir = os.path.dirname(status_file) if not os.path.exists(status_file_dir): os.makedirs(status_file_dir) msg_withtime = '{} {}'.format(dt.now().strftime('%Y/%m/%d@%H:%M:%S'), msg) if clobber: mode = 'w' msg_logged = msg_withtime else: mode = 'a' msg_logged = '\n{}'.format(msg_withtime) with open(status_file, mode) as logf: logf.write(msg_logged) def summary_remaining(drpall, group_col='ifudesignsize'): complete, hipri = zip(*list(map(log_indicates_complete, drpall['plateifu']))) complete, hipri = np.array(complete), np.array(hipri) drpall['complete'] = complete drpall['hipri_rerun'] = hipri drpall['lopri_rerun'] = (~hipri) & (~complete) runtab = drpall[group_col, 'complete', 'hipri_rerun', 'lopri_rerun'] runtab_group = runtab.group_by(group_col) runtab_groupstats = runtab_group.groups.aggregate(np.sum) print(runtab_groupstats) if __name__ == '__main__': import manga_tools as m drpall = m.load_drpall(mpl_v) drpall = drpall[(drpall['ifudesignsize'] > 0) * (drpall['nsa_z'] != -9999.)] #print(drpall) summary_remaining(drpall)
zpace/stellarmass_pca
pca_status.py
Python
mit
3,229
[ "Galaxy" ]
0a1918c41abc68549d709f244420e2562c9413062d0258ee21f6340cc041cffa
# Unsharp Filter Example # # This example shows off using the guassian filter to unsharp mask filter images. import sensor, image, time sensor.reset() # Initialize the camera sensor. sensor.set_pixformat(sensor.GRAYSCALE) # or sensor.RGB565 sensor.set_framesize(sensor.QQVGA) # or sensor.QVGA (or others) sensor.skip_frames(time = 2000) # Let new settings take affect. clock = time.clock() # Tracks FPS. while(True): clock.tick() # Track elapsed milliseconds between snapshots(). img = sensor.snapshot() # Take a picture and return the image. # Run the kernel on every pixel of the image. img.gaussian(1, unsharp=True) print(clock.fps()) # Note: Your OpenMV Cam runs about half as fast while # connected to your computer. The FPS should increase once disconnected.
kwagyeman/openmv
scripts/examples/OpenMV/04-Image-Filters/unsharp_filter.py
Python
mit
794
[ "Gaussian" ]
67ce55f86d2cfa1708f0ee329d20f1568f0d4d8b98d7da25173a2dd02a03ffd7
""" Test the FTS3Utilities""" from DIRAC.DataManagementSystem.Client.FTS3File import FTS3File from DIRAC import S_OK, S_ERROR __RCSID__ = "$Id $" import unittest import mock import datetime from DIRAC.DataManagementSystem.private.FTS3Utilities import FTS3JSONDecoder, \ FTS3Serializable, \ groupFilesByTarget, \ generatePossibleTransfersBySources, \ selectUniqueSourceforTransfers, \ selectUniqueRandomSource, \ FTS3ServerPolicy import json class FakeClass(FTS3Serializable): """ Just a fake class""" _attrToSerialize = ['string', 'date', 'dic', 'sub'] def __init__(self): self.string = '' self.date = None self.dic = {} class TestFTS3Serialization(unittest.TestCase): """ Test the FTS3 JSON serialization mechanizme with FTS3JSONEncoder, FTS3JSONDecoder, FTS3Serializable""" def test_01_basic(self): """ Basic json transfer""" obj = FakeClass() obj.string = 'tata' obj.date = datetime.datetime.utcnow().replace(microsecond=0) obj.dic = {'a': 1} obj.notSerialized = 'Do not' obj2 = json.loads(obj.toJSON(), cls=FTS3JSONDecoder) self.assertTrue(obj.string == obj2.string) self.assertTrue(obj.date == obj2.date) self.assertTrue(obj.dic == obj2.dic) self.assertTrue(not hasattr(obj2, 'notSerialized')) def test_02_subobjects(self): """ Try setting as attribute an object """ class NonSerializable(object): """ Fake class not inheriting from FTS3Serializable""" pass obj = FakeClass() obj.sub = NonSerializable() with self.assertRaises(TypeError): obj.toJSON() obj.sub = FakeClass() obj.sub.string = 'pipo' obj2 = json.loads(obj.toJSON(), cls=FTS3JSONDecoder) self.assertTrue(obj.sub.string == obj2.sub.string) def mock__checkSourceReplicas(ftsFiles): succ = {} failed = {} for ftsFile in ftsFiles: if hasattr(ftsFile, 'fakeAttr_possibleSources'): succ[ftsFile.lfn] = dict.fromkeys(getattr(ftsFile, 'fakeAttr_possibleSources')) else: failed[ftsFile.lfn] = 'No such file or directory' return S_OK({'Successful': succ, 'Failed': failed}) class TestFileGrouping(unittest.TestCase): """ Testing all the grouping functions of FTS3Utilities """ def setUp(self): self.f1 = FTS3File() self.f1.fakeAttr_possibleSources = ['Src1', 'Src2'] self.f1.lfn = 'f1' self.f1.targetSE = 'target1' self.f2 = FTS3File() self.f2.fakeAttr_possibleSources = ['Src2', 'Src3'] self.f2.lfn = 'f2' self.f2.targetSE = 'target2' self.f3 = FTS3File() self.f3.fakeAttr_possibleSources = ['Src4'] self.f3.lfn = 'f3' self.f3.targetSE = 'target1' # File does not exist :-) self.f4 = FTS3File() self.f4.lfn = 'f4' self.f4.targetSE = 'target3' self.allFiles = [self.f1, self.f2, self.f3, self.f4] def test_01_groupFilesByTarget(self): # empty input self.assertTrue(groupFilesByTarget([])['Value'] == {}) res = groupFilesByTarget(self.allFiles) self.assertTrue(res['OK']) groups = res['Value'] self.assertTrue(self.f1 in groups['target1']) self.assertTrue(self.f2 in groups['target2']) self.assertTrue(self.f3 in groups['target1']) self.assertTrue(self.f4 in groups['target3']) @mock.patch( 'DIRAC.DataManagementSystem.private.FTS3Utilities._checkSourceReplicas', side_effect=mock__checkSourceReplicas) def test_02_generatePossibleTransfersBySources(self, _mk_checkSourceReplicas): """ Get all the possible sources""" # We assume here that they all go to the same target res = generatePossibleTransfersBySources(self.allFiles) self.assertTrue(res['OK']) groups = res['Value'] self.assertTrue(self.f1 in groups['Src1']) self.assertTrue(self.f1 in groups['Src2']) self.assertTrue(self.f2 in groups['Src2']) self.assertTrue(self.f2 in groups['Src3']) self.assertTrue(self.f3 in groups['Src4']) self.assertTrue(self.f2 in groups['Src3']) @mock.patch( 'DIRAC.DataManagementSystem.private.FTS3Utilities._checkSourceReplicas', side_effect=mock__checkSourceReplicas) def test_03_selectUniqueSourceforTransfers(self, _mk_checkSourceReplicas): """ Suppose they all go to the same target """ groupBySource = generatePossibleTransfersBySources(self.allFiles)['Value'] res = selectUniqueSourceforTransfers(groupBySource) self.assertTrue(res['OK']) uniqueSources = res['Value'] # Src1 and Src2 should not be here because f1 and f2 should be taken from Src2 self.assertTrue(sorted(uniqueSources.keys()) == sorted(['Src2', 'Src4'])) self.assertTrue(self.f1 in uniqueSources['Src2']) self.assertTrue(self.f2 in uniqueSources['Src2']) self.assertTrue(self.f3 in uniqueSources['Src4']) @mock.patch( 'DIRAC.DataManagementSystem.private.FTS3Utilities._checkSourceReplicas', side_effect=mock__checkSourceReplicas) def test_04_selectUniqueRandomSource(self, _mk_checkSourceReplicas): """ Suppose they all go to the same target """ res = selectUniqueRandomSource(self.allFiles) self.assertTrue(res['OK']) uniqueSources = res['Value'] # There should be only f1,f2 and f3 allReturnedFiles = [] existingFiles = [self.f1, self.f2, self.f3] for srcSe, ftsFiles in uniqueSources.iteritems(): allReturnedFiles.extend(ftsFiles) # No files should be duplicated and all files should be there, except the non existing one self.assertEqual(len(existingFiles), len(allReturnedFiles)) self.assertEqual(set(existingFiles), set(allReturnedFiles)) filesInSrc1 = uniqueSources.get('Src1', []) filesInSrc2 = uniqueSources.get('Src2', []) filesInSrc3 = uniqueSources.get('Src3', []) filesInSrc4 = uniqueSources.get('Src4', []) # f1 self.assertTrue(self.f1 in filesInSrc1 + filesInSrc2) self.assertTrue(self.f2 in filesInSrc2 + filesInSrc3) self.assertTrue(self.f3 in filesInSrc4) def mock__failoverServerPolicy(_attempt): return "server_0" def mock__randomServerPolicy(_attempt): return "server_0" def mock__sequenceServerPolicy(_attempt): return "server_0" def mock__OKFTSServerStatus(ftsServer): return S_OK(ftsServer) def mock__ErrorFTSServerStatus(ftsServer): return S_ERROR(ftsServer) class TestFTS3ServerPolicy (unittest.TestCase): """ Testing FTS3 ServerPolicy selection """ def setUp(self): self.fakeServerDict = {"server_0": "server0.cern.ch", "server_1": "server1.cern.ch", "server_2": "server2.cern.ch"} @mock.patch( 'DIRAC.DataManagementSystem.private.FTS3Utilities.FTS3ServerPolicy._getFTSServerStatus', side_effect=mock__OKFTSServerStatus) @mock.patch( 'DIRAC.DataManagementSystem.private.FTS3Utilities.FTS3ServerPolicy._sequenceServerPolicy', side_effect=mock__sequenceServerPolicy) @mock.patch( 'DIRAC.DataManagementSystem.private.FTS3Utilities.FTS3ServerPolicy._randomServerPolicy', side_effect=mock__randomServerPolicy) @mock.patch( 'DIRAC.DataManagementSystem.private.FTS3Utilities.FTS3ServerPolicy._failoverServerPolicy', side_effect=mock__failoverServerPolicy) def testCorrectServerPolicyIsUsed( self, mockFailoverFunc, mockRandomFunc, mockSequenceFunc, mockFTSServerStatus): " Test correct server policy method is called " obj = FTS3ServerPolicy(self.fakeServerDict, "Sequence") obj.chooseFTS3Server() self.assertTrue(mockSequenceFunc.called) obj = FTS3ServerPolicy(self.fakeServerDict, "Random") obj.chooseFTS3Server() self.assertTrue(mockRandomFunc.called) obj = FTS3ServerPolicy(self.fakeServerDict, "Failover") obj.chooseFTS3Server() self.assertTrue(mockFailoverFunc.called) # random policy should be selected for an invalid policy obj = FTS3ServerPolicy(self.fakeServerDict, "InvalidPolicy") obj.chooseFTS3Server() self.assertTrue(mockRandomFunc.called) @mock.patch( 'DIRAC.DataManagementSystem.private.FTS3Utilities.FTS3ServerPolicy._getFTSServerStatus', side_effect=mock__ErrorFTSServerStatus) def testFailoverServerPolicy(self, mockFTSServerStatus): """ Test if the failover server policy returns server at a given position""" obj = FTS3ServerPolicy(self.fakeServerDict, "Failover") for i in range(len(self.fakeServerDict)): self.assertEquals('server_%d' % i, obj._failoverServerPolicy(i)) @mock.patch( 'DIRAC.DataManagementSystem.private.FTS3Utilities.FTS3ServerPolicy._getFTSServerStatus', side_effect=mock__ErrorFTSServerStatus) def testSequenceServerPolicy(self, mockFTSServerStatus): """ Test if the sequence server policy selects the servers Sequentially """ obj = FTS3ServerPolicy(self.fakeServerDict, "Sequence") for i in range(len(self.fakeServerDict)): self.assertEquals('server_%d' % i, obj._sequenceServerPolicy(i)) self.assertEquals('server_0', obj._sequenceServerPolicy(i)) @mock.patch( 'DIRAC.DataManagementSystem.private.FTS3Utilities.FTS3ServerPolicy._getFTSServerStatus', side_effect=mock__ErrorFTSServerStatus) def testRandomServerPolicy(self, mockFTSServerStatus): """ Test if the random server policy does not selects the same server multiple times """ obj = FTS3ServerPolicy(self.fakeServerDict, "Random") serverSet = set() for i in range(len(self.fakeServerDict)): serverSet.add(obj._randomServerPolicy(i)) self.assertEquals(len(serverSet), len(self.fakeServerDict)) if __name__ == '__main__': suite = unittest.defaultTestLoader.loadTestsFromTestCase(TestFTS3Serialization) suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(TestFileGrouping)) suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(TestFTS3ServerPolicy)) unittest.TextTestRunner(verbosity=2).run(suite)
andresailer/DIRAC
DataManagementSystem/private/test/Test_FTS3Utilities.py
Python
gpl-3.0
9,904
[ "DIRAC" ]
9673968708339cd327bfd25046cd5ab9c3f74a38c8c06e28f1fab58887819c3d
# coding: utf-8 from __future__ import unicode_literals, division import json from monty.json import MontyEncoder, MontyDecoder """ Created on Dec 6, 2012 """ import os import shutil from unittest import TestCase import unittest from pkg_resources import parse_version import pymatgen import copy from custodian.qchem.handlers import QChemErrorHandler from custodian.qchem.jobs import QchemJob __author__ = "Xiaohui Qu" __version__ = "0.1" __maintainer__ = "Xiaohui Qu" __email__ = "xqu@lbl.gov" __date__ = "Dec 6, 2013" test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "test_files", "qchem") # noinspection PyUnresolvedReferences scr_dir = os.path.join(test_dir, "scr") class QChemErrorHandlerTest(TestCase): def setUp(self): os.makedirs(scr_dir) os.chdir(scr_dir) @classmethod def _revert_scf_fix_strategy_to_version(cls, old_lines, fix_version="1.0"): old_lines = copy.deepcopy(old_lines) start_index = 0 end_index = 0 for i, v in enumerate(old_lines): if "<SCF Fix Strategy>" in v: start_index = i + 1 break for i, v in enumerate(old_lines): if "</SCF Fix Strategy>" in v: end_index = i break old_strategy_text = old_lines[start_index: end_index] old_strategy = json.loads("\n".join(["{"] + old_strategy_text + ["}"])) target_version_strategy = dict() if fix_version == "1.0": target_version_strategy["current_method_id"] = old_strategy["current_method_id"] if old_strategy["methods"][1] == "rca_diis": methods_list = ["increase_iter", "rca_diis", "gwh", "gdm", "rca", "core+rca"] else: methods_list = ["increase_iter", "diis_gdm", "gwh", "rca", "gdm", "core+gdm"] target_version_strategy["methods"] = methods_list elif fix_version == "2.0": target_version_strategy["current_method_id"] = old_strategy["current_method_id"] if old_strategy["methods"][1] == "rca_diis": methods_list = ["increase_iter", "rca_diis", "gwh", "gdm", "rca", "core+rca", "fon"] else: methods_list = ["increase_iter", "diis_gdm", "gwh", "rca", "gdm", "core+gdm", "fon"] target_version_strategy["methods"] = methods_list target_version_strategy["version"] = old_strategy["version"] else: raise ValueError("Revert to SCF Fix Strategy Version \"{}\" is not " "supported yet".format(fix_version)) target_version_strategy_text = json.dumps(target_version_strategy, indent=4, sort_keys=True) stripped_target_stragy_lines = [line.strip() for line in target_version_strategy_text.split("\n")] target_lines = copy.deepcopy(old_lines) target_lines[start_index: end_index] = stripped_target_stragy_lines[1: -1] return target_lines def test_scf_rca(self): shutil.copyfile(os.path.join(test_dir, "hf_rca.inp"), os.path.join(scr_dir, "hf_rca.inp")) shutil.copyfile(os.path.join(test_dir, "hf_rca.out"), os.path.join(scr_dir, "hf_rca.out")) h = QChemErrorHandler(input_file="hf_rca.inp", output_file="hf_rca.out") has_error = h.check() self.assertTrue(has_error) d = h.correct() self.assertEqual(d, {'errors': ['Bad SCF convergence', 'Geometry optimization failed', 'Molecular charge is not found'], 'actions': ['increase_iter']}) with open(os.path.join(test_dir, "hf_rca_tried_0.inp")) as f: ref = [line.strip() for line in f.readlines()] with open(os.path.join(scr_dir, "hf_rca.inp")) as f: ans = [line.strip() for line in f.readlines()] ans = self._revert_scf_fix_strategy_to_version(ans, fix_version="1.0") self.assertEqual(ref, ans) shutil.copyfile(os.path.join(test_dir, "hf_rca_tried_0.inp"), os.path.join(scr_dir, "hf_rca_tried_0.inp")) shutil.copyfile(os.path.join(test_dir, "hf_rca.out"), os.path.join(scr_dir, "hf_rca.out")) h = QChemErrorHandler(input_file="hf_rca_tried_0.inp", output_file="hf_rca.out") has_error = h.check() self.assertTrue(has_error) d = h.correct() self.assertEqual(d, {'errors': ['Bad SCF convergence', 'Geometry optimization failed', 'Molecular charge is not found'], 'actions': ['rca_diis']}) with open(os.path.join(test_dir, "hf_rca_tried_1.inp")) as f: ref = [line.strip() for line in f.readlines()] with open(os.path.join(scr_dir, "hf_rca_tried_0.inp")) as f: ans = [line.strip() for line in f.readlines()] self.assertEqual(ref, ans) shutil.copyfile(os.path.join(test_dir, "hf_rca_tried_1.inp"), os.path.join(scr_dir, "hf_rca_tried_1.inp")) shutil.copyfile(os.path.join(test_dir, "hf_rca.out"), os.path.join(scr_dir, "hf_rca.out")) h = QChemErrorHandler(input_file="hf_rca_tried_1.inp", output_file="hf_rca.out") has_error = h.check() self.assertTrue(has_error) d = h.correct() self.assertEqual(d, {'errors': ['Bad SCF convergence', 'Geometry optimization failed', 'Molecular charge is not found'], 'actions': ['gwh']}) with open(os.path.join(test_dir, "hf_rca_tried_2.inp")) as f: ref = [line.strip() for line in f.readlines()] with open(os.path.join(scr_dir, "hf_rca_tried_1.inp")) as f: ans = [line.strip() for line in f.readlines()] self.assertEqual(ref, ans) shutil.copyfile(os.path.join(test_dir, "hf_rca_tried_2.inp"), os.path.join(scr_dir, "hf_rca_tried_2.inp")) shutil.copyfile(os.path.join(test_dir, "hf_rca.out"), os.path.join(scr_dir, "hf_rca.out")) h = QChemErrorHandler(input_file="hf_rca_tried_2.inp", output_file="hf_rca.out") has_error = h.check() self.assertTrue(has_error) d = h.correct() self.assertEqual(d, {'errors': ['Bad SCF convergence', 'Geometry optimization failed', 'Molecular charge is not found'], 'actions': ['gdm']}) with open(os.path.join(test_dir, "hf_rca_tried_3.inp")) as f: ref = [line.strip() for line in f.readlines()] with open(os.path.join(scr_dir, "hf_rca_tried_2.inp")) as f: ans = [line.strip() for line in f.readlines()] self.assertEqual(ref, ans) shutil.copyfile(os.path.join(test_dir, "hf_rca_tried_3.inp"), os.path.join(scr_dir, "hf_rca_tried_3.inp")) shutil.copyfile(os.path.join(test_dir, "hf_rca.out"), os.path.join(scr_dir, "hf_rca.out")) h = QChemErrorHandler(input_file="hf_rca_tried_3.inp", output_file="hf_rca.out") has_error = h.check() self.assertTrue(has_error) d = h.correct() self.assertEqual(d, {'errors': ['Bad SCF convergence', 'Geometry optimization failed', 'Molecular charge is not found'], 'actions': ['rca']}) with open(os.path.join(test_dir, "hf_rca_tried_4.inp")) as f: ref = [line.strip() for line in f.readlines()] with open(os.path.join(scr_dir, "hf_rca_tried_3.inp")) as f: ans = [line.strip() for line in f.readlines()] self.assertEqual(ref, ans) shutil.copyfile(os.path.join(test_dir, "hf_rca_tried_4.inp"), os.path.join(scr_dir, "hf_rca_tried_4.inp")) shutil.copyfile(os.path.join(test_dir, "hf_rca.out"), os.path.join(scr_dir, "hf_rca.out")) h = QChemErrorHandler(input_file="hf_rca_tried_4.inp", output_file="hf_rca.out") has_error = h.check() self.assertTrue(has_error) d = h.correct() self.assertEqual(d, {'errors': ['Bad SCF convergence', 'Geometry optimization failed', 'Molecular charge is not found'], 'actions': ['core+rca']}) with open(os.path.join(test_dir, "hf_rca_tried_5.inp")) as f: ref = [line.strip() for line in f.readlines()] with open(os.path.join(scr_dir, "hf_rca_tried_4.inp")) as f: ans = [line.strip() for line in f.readlines()] self.assertEqual(ref, ans) shutil.copyfile(os.path.join(test_dir, "hf_rca_tried_5.inp"), os.path.join(scr_dir, "hf_rca_tried_5.inp")) shutil.copyfile(os.path.join(test_dir, "hf_rca.out"), os.path.join(scr_dir, "hf_rca.out")) h = QChemErrorHandler(input_file="hf_rca_tried_5.inp", output_file="hf_rca.out") has_error = h.check() self.assertTrue(has_error) d = h.correct() self.assertEqual(d, {'errors': ['Bad SCF convergence', 'Geometry optimization failed', 'Molecular charge is not found'], 'actions': None}) def test_scf_fon(self): shutil.copyfile(os.path.join(test_dir, "hf_rca_hit_5.inp"), os.path.join(scr_dir, "hf_rca_hit_5.inp")) shutil.copyfile(os.path.join(test_dir, "hf_rca.out"), os.path.join(scr_dir, "hf_rca.out")) h = QChemErrorHandler(input_file="hf_rca_hit_5.inp", output_file="hf_rca.out") has_error = h.check() self.assertTrue(has_error) d = h.correct() self.assertEqual(d, {'errors': ['Bad SCF convergence', 'Geometry optimization failed', 'Molecular charge is not found'], 'actions': ['fon']}) with open(os.path.join(test_dir, "hf_rca_hit_5_fon.inp")) as f: ref = [line.strip() for line in f.readlines()] with open(os.path.join(scr_dir, "hf_rca_hit_5.inp")) as f: ans = [line.strip() for line in f.readlines()] ans = self._revert_scf_fix_strategy_to_version(ans, fix_version="2.0") self.assertEqual(ref, ans) def test_negative_eigen(self): shutil.copyfile(os.path.join(test_dir, "negative_eigen.qcinp"), os.path.join(scr_dir, "negative_eigen.qcinp")) shutil.copyfile(os.path.join(test_dir, "negative_eigen.qcout"), os.path.join(scr_dir, "negative_eigen.qcout")) h = QChemErrorHandler(input_file="negative_eigen.qcinp", output_file="negative_eigen.qcout") has_error = h.check() self.assertTrue(has_error) d = h.correct() self.assertEqual(d, {'errors': ['Bad SCF convergence', 'Molecular charge is not found', 'Negative Eigen'], 'actions': ['use tight integral threshold']}) with open(os.path.join(test_dir, "negative_eigen_tried_1.qcinp")) as f: ref = [line.strip() for line in f.readlines()] with open(os.path.join(scr_dir, "negative_eigen.qcinp")) as f: ans = [line.strip() for line in f.readlines()] self.assertEqual(ref, ans) shutil.copyfile(os.path.join(test_dir, "negative_eigen_tried_1.qcinp"), os.path.join(scr_dir, "negative_eigen_tried_1.qcinp")) shutil.copyfile(os.path.join(test_dir, "negative_eigen.qcout"), os.path.join(scr_dir, "negative_eigen.qcout")) h = QChemErrorHandler(input_file="negative_eigen_tried_1.qcinp", output_file="negative_eigen.qcout") has_error = h.check() self.assertTrue(has_error) d = h.correct() self.assertEqual(d, {'errors': ['Bad SCF convergence', 'Molecular charge is not found', 'Negative Eigen'], 'actions': ['use even tighter integral threshold']}) with open(os.path.join(test_dir, "negative_eigen_tried_2.qcinp")) as f: ref = [line.strip() for line in f.readlines()] with open(os.path.join(scr_dir, "negative_eigen_tried_1.qcinp")) as f: ans = [line.strip() for line in f.readlines()] self.assertEqual(ref, ans) def test_no_error(self): shutil.copyfile(os.path.join(test_dir, "hf_no_error.inp"), os.path.join(scr_dir, "hf_no_error.inp")) shutil.copyfile(os.path.join(test_dir, "hf_no_error.out"), os.path.join(scr_dir, "hf_no_error.out")) h = QChemErrorHandler(input_file="hf_no_error.inp", output_file="hf_no_error.out") has_error = h.check() self.assertFalse(has_error) def test_scf_reset(self): shutil.copyfile(os.path.join(test_dir, "hf_rca_tried_1.inp"), os.path.join(scr_dir, "hf_scf_reset.inp")) shutil.copyfile(os.path.join(test_dir, "hf_scf_reset.out"), os.path.join(scr_dir, "hf_scf_reset.out")) h = QChemErrorHandler(input_file="hf_scf_reset.inp", output_file="hf_scf_reset.out") has_error = h.check() self.assertTrue(has_error) d = h.correct() self.assertEqual(d, {'errors': ['Bad SCF convergence', 'Geometry optimization failed'], 'actions': ['reset']}) with open(os.path.join(test_dir, "hf_scf_reset.inp")) as f: ref = [line.strip() for line in f.readlines()] with open(os.path.join(scr_dir, "hf_scf_reset.inp")) as f: ans = [line.strip() for line in f.readlines()] self.assertEqual(ref, ans) def test_unable_to_determine_lambda(self): shutil.copyfile(os.path.join(test_dir, "unable_to_determine_lambda_in_geom_opt.qcinp"), os.path.join(scr_dir, "unable_to_determine_lambda_in_geom_opt.qcinp")) shutil.copyfile(os.path.join(test_dir, "unable_to_determine_lambda_in_geom_opt.qcout"), os.path.join(scr_dir, "unable_to_determine_lambda_in_geom_opt.qcout")) h = QChemErrorHandler(input_file="unable_to_determine_lambda_in_geom_opt.qcinp", output_file="unable_to_determine_lambda_in_geom_opt.qcout") has_error = h.check() self.assertTrue(has_error) d = h.correct() self.assertEqual(d, {'errors': ['Geometry optimization failed', 'Lamda Determination Failed'], 'actions': ['reset']}) with open(os.path.join(test_dir, "unable_to_determine_lambda_in_geom_opt_reset.qcinp")) as f: ref = [line.strip() for line in f.readlines()] with open(os.path.join(scr_dir, "unable_to_determine_lambda_in_geom_opt.qcinp")) as f: ans = [line.strip() for line in f.readlines()] self.assertEqual(ref, ans) def test_scf_gdm(self): shutil.copyfile(os.path.join(test_dir, "hf_gdm.inp"), os.path.join(scr_dir, "hf_gdm.inp")) shutil.copyfile(os.path.join(test_dir, "hf_gdm.out"), os.path.join(scr_dir, "hf_gdm.out")) h = QChemErrorHandler(input_file="hf_gdm.inp", output_file="hf_gdm.out") has_error = h.check() self.assertTrue(has_error) d = h.correct() self.assertEqual(d, {'errors': ['Bad SCF convergence', 'Geometry optimization failed', 'Molecular charge is not found'], 'actions': ['increase_iter']}) with open(os.path.join(test_dir, "hf_gdm_tried_0.inp")) as f: ref = [line.strip() for line in f.readlines()] with open(os.path.join(scr_dir, "hf_gdm.inp")) as f: ans = [line.strip() for line in f.readlines()] ans = self._revert_scf_fix_strategy_to_version(ans, fix_version="1.0") self.assertEqual(ref, ans) shutil.copyfile(os.path.join(test_dir, "hf_gdm_tried_0.inp"), os.path.join(scr_dir, "hf_gdm_tried_0.inp")) shutil.copyfile(os.path.join(test_dir, "hf_gdm.out"), os.path.join(scr_dir, "hf_gdm.out")) h = QChemErrorHandler(input_file="hf_gdm_tried_0.inp", output_file="hf_gdm.out") has_error = h.check() self.assertTrue(has_error) d = h.correct() self.assertEqual(d, {'errors': ['Bad SCF convergence', 'Geometry optimization failed', 'Molecular charge is not found'], 'actions': ['diis_gdm']}) with open(os.path.join(test_dir, "hf_gdm_tried_1.inp")) as f: ref = [line.strip() for line in f.readlines()] with open(os.path.join(scr_dir, "hf_gdm_tried_0.inp")) as f: ans = [line.strip() for line in f.readlines()] self.assertEqual(ref, ans) shutil.copyfile(os.path.join(test_dir, "hf_gdm_tried_1.inp"), os.path.join(scr_dir, "hf_gdm_tried_1.inp")) shutil.copyfile(os.path.join(test_dir, "hf_gdm.out"), os.path.join(scr_dir, "hf_gdm.out")) h = QChemErrorHandler(input_file="hf_gdm_tried_1.inp", output_file="hf_gdm.out") has_error = h.check() self.assertTrue(has_error) d = h.correct() self.assertEqual(d, {'errors': ['Bad SCF convergence', 'Geometry optimization failed', 'Molecular charge is not found'], 'actions': ['gwh']}) with open(os.path.join(test_dir, "hf_gdm_tried_2.inp")) as f: ref = [line.strip() for line in f.readlines()] with open(os.path.join(scr_dir, "hf_gdm_tried_1.inp")) as f: ans = [line.strip() for line in f.readlines()] self.assertEqual(ref, ans) shutil.copyfile(os.path.join(test_dir, "hf_gdm_tried_2.inp"), os.path.join(scr_dir, "hf_gdm_tried_2.inp")) shutil.copyfile(os.path.join(test_dir, "hf_gdm.out"), os.path.join(scr_dir, "hf_gdm.out")) h = QChemErrorHandler(input_file="hf_gdm_tried_2.inp", output_file="hf_gdm.out") has_error = h.check() self.assertTrue(has_error) d = h.correct() self.assertEqual(d, {'errors': ['Bad SCF convergence', 'Geometry optimization failed', 'Molecular charge is not found'], 'actions': ['rca']}) with open(os.path.join(test_dir, "hf_gdm_tried_3.inp")) as f: ref = [line.strip() for line in f.readlines()] with open(os.path.join(scr_dir, "hf_gdm_tried_2.inp")) as f: ans = [line.strip() for line in f.readlines()] self.assertEqual(ref, ans) shutil.copyfile(os.path.join(test_dir, "hf_gdm_tried_3.inp"), os.path.join(scr_dir, "hf_gdm_tried_3.inp")) shutil.copyfile(os.path.join(test_dir, "hf_gdm.out"), os.path.join(scr_dir, "hf_gdm.out")) h = QChemErrorHandler(input_file="hf_gdm_tried_3.inp", output_file="hf_gdm.out") has_error = h.check() self.assertTrue(has_error) d = h.correct() self.assertEqual(d, {'errors': ['Bad SCF convergence', 'Geometry optimization failed', 'Molecular charge is not found'], 'actions': ['gdm']}) with open(os.path.join(test_dir, "hf_gdm_tried_4.inp")) as f: ref = [line.strip() for line in f.readlines()] with open(os.path.join(scr_dir, "hf_gdm_tried_3.inp")) as f: ans = [line.strip() for line in f.readlines()] self.assertEqual(ref, ans) shutil.copyfile(os.path.join(test_dir, "hf_gdm_tried_4.inp"), os.path.join(scr_dir, "hf_gdm_tried_4.inp")) shutil.copyfile(os.path.join(test_dir, "hf_gdm.out"), os.path.join(scr_dir, "hf_gdm.out")) h = QChemErrorHandler(input_file="hf_gdm_tried_4.inp", output_file="hf_gdm.out") has_error = h.check() self.assertTrue(has_error) d = h.correct() self.assertEqual(d, {'errors': ['Bad SCF convergence', 'Geometry optimization failed', 'Molecular charge is not found'], 'actions': ['core+gdm']}) with open(os.path.join(test_dir, "hf_gdm_tried_5.inp")) as f: ref = [line.strip() for line in f.readlines()] with open(os.path.join(scr_dir, "hf_gdm_tried_4.inp")) as f: ans = [line.strip() for line in f.readlines()] self.assertEqual(ref, ans) shutil.copyfile(os.path.join(test_dir, "hf_gdm_tried_5.inp"), os.path.join(scr_dir, "hf_gdm_tried_5.inp")) shutil.copyfile(os.path.join(test_dir, "hf_gdm.out"), os.path.join(scr_dir, "hf_gdm.out")) h = QChemErrorHandler(input_file="hf_gdm_tried_5.inp", output_file="hf_gdm.out") has_error = h.check() self.assertTrue(has_error) d = h.correct() self.assertEqual(d, {'errors': ['Bad SCF convergence', 'Geometry optimization failed', 'Molecular charge is not found'], 'actions': None}) def test_opt_failed(self): shutil.copyfile(os.path.join(test_dir, "hf_opt_failed.qcinp"), os.path.join(scr_dir, "hf_opt_failed.qcinp")) shutil.copyfile(os.path.join(test_dir, "hf_opt_failed.qcout"), os.path.join(scr_dir, "hf_opt_failed.qcout")) h = QChemErrorHandler(input_file="hf_opt_failed.qcinp", output_file="hf_opt_failed.qcout") has_error = h.check() self.assertTrue(has_error) d = h.correct() self.assertEqual(d, {'errors': ['Geometry optimization failed'], 'actions': ['increase_iter']}) with open(os.path.join(test_dir, "hf_opt_failed_tried_0.qcinp")) as f: ref = [line.strip() for line in f.readlines()] with open(os.path.join(scr_dir, "hf_opt_failed.qcinp")) as f: ans = [line.strip() for line in f.readlines()] self.assertEqual(ref, ans) shutil.copyfile(os.path.join(test_dir, "hf_opt_failed_tried_0.qcinp"), os.path.join(scr_dir, "hf_opt_failed_tried_0.qcinp")) shutil.copyfile(os.path.join(test_dir, "hf_opt_failed.qcout"), os.path.join(scr_dir, "hf_opt_failed.qcout")) h = QChemErrorHandler(input_file="hf_opt_failed_tried_0.qcinp", output_file="hf_opt_failed.qcout") has_error = h.check() self.assertTrue(has_error) d = h.correct() self.assertEqual(d, {'errors': ['Geometry optimization failed'], 'actions': ['GDIIS']}) with open(os.path.join(test_dir, "hf_opt_failed_tried_1.qcinp")) as f: ref = [line.strip() for line in f.readlines()] with open(os.path.join(scr_dir, "hf_opt_failed_tried_0.qcinp")) as f: ans = [line.strip() for line in f.readlines()] self.assertEqual(ref, ans) shutil.copyfile(os.path.join(test_dir, "hf_opt_failed_tried_1.qcinp"), os.path.join(scr_dir, "hf_opt_failed_tried_1.qcinp")) shutil.copyfile(os.path.join(test_dir, "hf_opt_failed.qcout"), os.path.join(scr_dir, "hf_opt_failed.qcout")) h = QChemErrorHandler(input_file="hf_opt_failed_tried_1.qcinp", output_file="hf_opt_failed.qcout") has_error = h.check() self.assertTrue(has_error) d = h.correct() self.assertEqual(d, {'errors': ['Geometry optimization failed'], 'actions': ['CartCoords']}) with open(os.path.join(test_dir, "hf_opt_failed_tried_2.qcinp")) as f: ref = [line.strip() for line in f.readlines()] with open(os.path.join(scr_dir, "hf_opt_failed_tried_1.qcinp")) as f: ans = [line.strip() for line in f.readlines()] self.assertEqual(ref, ans) shutil.copyfile(os.path.join(test_dir, "hf_opt_failed_tried_2.qcinp"), os.path.join(scr_dir, "hf_opt_failed_tried_2.qcinp")) shutil.copyfile(os.path.join(test_dir, "hf_opt_failed.qcout"), os.path.join(scr_dir, "hf_opt_failed.qcout")) h = QChemErrorHandler(input_file="hf_opt_failed_tried_2.qcinp", output_file="hf_opt_failed.qcout") has_error = h.check() self.assertTrue(has_error) d = h.correct() self.assertEqual(d, {'errors': ['Geometry optimization failed'], 'actions': None}) def test_autoz_error(self): shutil.copyfile(os.path.join(test_dir, "qunino_vinyl.qcinp"), os.path.join(scr_dir, "qunino_vinyl.qcinp")) shutil.copyfile(os.path.join(test_dir, "qunino_vinyl.qcout"), os.path.join(scr_dir, "qunino_vinyl.qcout")) h = QChemErrorHandler(input_file="qunino_vinyl.qcinp", output_file="qunino_vinyl.qcout") has_error = h.check() self.assertTrue(has_error) d = h.correct() self.assertEqual(d, {'errors': ['Bad SCF convergence', 'Geometry optimization failed', 'Molecular charge is not found', 'autoz error'], 'actions': ['disable symmetry']}) with open(os.path.join(test_dir, "qunino_vinyl_nosymm.qcinp")) as f: ref = [line.strip() for line in f.readlines()] with open(os.path.join(scr_dir, "qunino_vinyl.qcinp")) as f: ans = [line.strip() for line in f.readlines()] self.assertEqual(ref, ans) shutil.copyfile(os.path.join(test_dir, "qunino_vinyl_nosymm.qcinp"), os.path.join(scr_dir, "qunino_vinyl_nosymm.qcinp")) shutil.copyfile(os.path.join(test_dir, "qunino_vinyl.qcout"), os.path.join(scr_dir, "qunino_vinyl.qcout")) h = QChemErrorHandler(input_file="qunino_vinyl_nosymm.qcinp", output_file="qunino_vinyl.qcout") has_error = h.check() self.assertTrue(has_error) d = h.correct() self.assertEqual(d, {'errors': ['Bad SCF convergence', 'Geometry optimization failed', 'Molecular charge is not found', 'autoz error'], 'actions': None}) def test_nan_error(self): shutil.copyfile(os.path.join(test_dir, "thiane_nan.inp"), os.path.join(scr_dir, "thiane_nan.inp")) shutil.copyfile(os.path.join(test_dir, "thiane_nan.out"), os.path.join(scr_dir, "thiane_nan.out")) h = QChemErrorHandler(input_file="thiane_nan.inp", output_file="thiane_nan.out") has_error = h.check() self.assertTrue(has_error) d = h.correct() self.assertEqual(d, {'errors': ['NAN values'], 'actions': ['use tighter grid']}) with open(os.path.join(test_dir, "thiane_nan_dense_grid.inp")) as f: ref = [line.strip() for line in f.readlines()] with open(os.path.join(scr_dir, "thiane_nan.inp")) as f: ans = [line.strip() for line in f.readlines()] self.assertEqual(ref, ans) shutil.copyfile(os.path.join(test_dir, "thiane_nan_dense_grid.inp"), os.path.join(scr_dir, "thiane_nan_dense_grid.inp")) shutil.copyfile(os.path.join(test_dir, "thiane_nan.out"), os.path.join(scr_dir, "thiane_nan.out")) h = QChemErrorHandler(input_file="thiane_nan_dense_grid.inp", output_file="thiane_nan.out") has_error = h.check() self.assertTrue(has_error) d = h.correct() self.assertEqual(d, {'errors': ['NAN values'], 'actions': None}) shutil.copyfile(os.path.join(test_dir, "h2o_nan.qcinp"), os.path.join(scr_dir, "h2o_nan.qcinp")) shutil.copyfile(os.path.join(test_dir, "h2o_nan.qcout"), os.path.join(scr_dir, "h2o_nan.qcout")) h = QChemErrorHandler(input_file="h2o_nan.qcinp", output_file="h2o_nan.qcout") has_error = h.check() self.assertTrue(has_error) d = h.correct() self.assertEqual(d, {'errors': ['NAN values'], 'actions': ['use tighter grid']}) with open(os.path.join(test_dir, "h2o_nan_dense_grid.qcinp")) as f: ref = [line.strip() for line in f.readlines()] with open(os.path.join(scr_dir, "h2o_nan.qcinp")) as f: ans = [line.strip() for line in f.readlines()] self.assertEqual(ref, ans) def test_no_input_text(self): shutil.copyfile(os.path.join(test_dir, "no_reading.qcinp"), os.path.join(scr_dir, "no_reading.qcinp")) shutil.copyfile(os.path.join(test_dir, "no_reading.qcout"), os.path.join(scr_dir, "no_reading.qcout")) h = QChemErrorHandler(input_file="no_reading.qcinp", output_file="no_reading.qcout") has_error = h.check() self.assertTrue(has_error) d = h.correct() self.assertEqual(d, {'errors': ['Bad SCF convergence', 'Exit Code 134', 'Molecular charge is not found', 'No input text'], 'actions': ['disable symmetry']}) with open(os.path.join(test_dir, "no_reading_nosymm.qcinp")) as f: ref = [line.strip() for line in f.readlines()] with open(os.path.join(scr_dir, "no_reading.qcinp")) as f: ans = [line.strip() for line in f.readlines()] self.assertEqual(ref, ans) def test_exit_code_134(self): shutil.copyfile(os.path.join(test_dir, "exit_code_134.qcinp"), os.path.join(scr_dir, "exit_code_134.qcinp")) shutil.copyfile(os.path.join(test_dir, "exit_code_134.qcout"), os.path.join(scr_dir, "exit_code_134.qcout")) h = QChemErrorHandler(input_file="exit_code_134.qcinp", output_file="exit_code_134.qcout") has_error = h.check() self.assertTrue(has_error) d = h.correct() self.assertEqual(d, {'errors': ['Bad SCF convergence', 'Exit Code 134', 'Molecular charge is not found'], 'actions': ['use tight integral threshold']}) with open(os.path.join(test_dir, "exit_code_134_tight_thresh.qcinp"))\ as f: ref = [line.strip() for line in f.readlines()] with open(os.path.join(scr_dir, "exit_code_134.qcinp")) as f: ans = [line.strip() for line in f.readlines()] self.assertEqual(ref, ans) def test_exit_code_134_after_scf_fix(self): shutil.copyfile(os.path.join(test_dir, "exit_134_after_scf_fix.qcinp"), os.path.join(scr_dir, "exit_134_after_scf_fix.qcinp")) shutil.copyfile(os.path.join(test_dir, "exit_134_after_scf_fix.qcout"), os.path.join(scr_dir, "exit_134_after_scf_fix.qcout")) h = QChemErrorHandler(input_file="exit_134_after_scf_fix.qcinp", output_file="exit_134_after_scf_fix.qcout") has_error = h.check() self.assertTrue(has_error) d = h.correct() self.assertEqual(d, {'errors': ['Bad SCF convergence', 'Exit Code 134', 'Geometry optimization failed', 'Molecular charge is not found'], 'actions': ['use tight integral threshold']}) with open(os.path.join(test_dir, "exit_134_after_scf_fix_tight_thresh.qcinp")) as f: ref = [line.strip() for line in f.readlines()] with open(os.path.join(scr_dir, "exit_134_after_scf_fix.qcinp")) as f: ans = [line.strip() for line in f.readlines()] self.assertEqual(ref, ans) shutil.copyfile(os.path.join(test_dir, "exit_134_after_scf_fix_tight_thresh.qcinp"), os.path.join(scr_dir, "exit_134_after_scf_fix_tight_thresh.qcinp")) shutil.copyfile(os.path.join(test_dir, "exit_134_after_scf_fix.qcout"), os.path.join(scr_dir, "exit_134_after_scf_fix.qcout")) qchem_job = QchemJob(qchem_cmd="qchem -np 24", input_file="exit_134_after_scf_fix_tight_thresh.qcinp", output_file="exit_134_after_scf_fix.qcout", alt_cmd={"half_cpus": "qchem -np 12", "openmp": "qchem -nt 24"}) h = QChemErrorHandler(input_file="exit_134_after_scf_fix_tight_thresh.qcinp", output_file="exit_134_after_scf_fix.qcout", qchem_job=qchem_job) has_error = h.check() self.assertTrue(has_error) d = h.correct() self.assertEqual(d, {'errors': ['Bad SCF convergence', 'Exit Code 134', 'Geometry optimization failed', 'Molecular charge is not found'], 'actions': ['openmp']}) def test_ts_opt(self): shutil.copyfile(os.path.join(test_dir, "ts_cf3_leave.qcinp"), os.path.join(scr_dir, "ts_cf3_leave.qcinp")) shutil.copyfile(os.path.join(test_dir, "ts_cf3_leave.qcout"), os.path.join(scr_dir, "ts_cf3_leave.qcout")) h = QChemErrorHandler(input_file="ts_cf3_leave.qcinp", output_file="ts_cf3_leave.qcout") has_error = h.check() self.assertTrue(has_error) d = h.correct() self.assertEqual(d, {'errors': ['Exit Code 134', 'Geometry optimization failed'], 'actions': ['increase_iter']}) with open(os.path.join(test_dir, "ts_cf3_leave_reset_first_step_mol.qcinp")) as f: ref = [line.strip() for line in f.readlines()] with open(os.path.join(scr_dir, "ts_cf3_leave.qcinp")) as f: ans = [line.strip() for line in f.readlines()] self.assertEqual(ref, ans) def test_scf_in_aimd_reset(self): shutil.copyfile(os.path.join(test_dir, "h2o_aimd.qcinp"), os.path.join(scr_dir, "h2o_aimd.qcinp")) shutil.copyfile(os.path.join(test_dir, "h2o_aimd.qcout"), os.path.join(scr_dir, "h2o_aimd.qcout")) h = QChemErrorHandler(input_file="h2o_aimd.qcinp", output_file="h2o_aimd.qcout") has_error = h.check() self.assertTrue(has_error) d = h.correct() self.assertEqual(d, {'errors': ['Bad SCF convergence'], 'actions': ['reset']}) with open(os.path.join(test_dir, "h2o_aimd_reset.qcinp")) as f: ref = [line.strip() for line in f.readlines()] with open(os.path.join(scr_dir, "h2o_aimd.qcinp")) as f: ans = [line.strip() for line in f.readlines()] self.assertEqual(ref, ans) def test_freq_job_too_small(self): shutil.copyfile(os.path.join(test_dir, "freq_seg_too_small.qcinp"), os.path.join(scr_dir, "freq_seg_too_small.qcinp")) shutil.copyfile(os.path.join(test_dir, "freq_seg_too_small.qcout"), os.path.join(scr_dir, "freq_seg_too_small.qcout")) h = QChemErrorHandler(input_file="freq_seg_too_small.qcinp", output_file="freq_seg_too_small.qcout") has_error = h.check() self.assertTrue(has_error) d = h.correct() self.assertEqual(d, {'errors': ['Exit Code 134', 'Freq Job Too Small'], 'actions': ['use 31 segment in CPSCF']}) with open(os.path.join(test_dir, "freq_seg_too_small_31_segments.qcinp")) as f: ref = [line.strip() for line in f.readlines()] with open(os.path.join(scr_dir, "freq_seg_too_small.qcinp")) as f: ans = [line.strip() for line in f.readlines()] self.assertEqual(ref, ans) shutil.copyfile(os.path.join(test_dir, "freq_seg_too_small_31_segments.qcinp"), os.path.join(scr_dir, "freq_seg_too_small_31_segments.qcinp")) shutil.copyfile(os.path.join(test_dir, "freq_seg_too_small.qcout"), os.path.join(scr_dir, "freq_seg_too_small.qcout")) h = QChemErrorHandler(input_file="freq_seg_too_small_31_segments.qcinp", output_file="freq_seg_too_small.qcout") has_error = h.check() self.assertTrue(has_error) d = h.correct() self.assertEqual(d, {'errors': ['Exit Code 134', 'Freq Job Too Small'], 'actions': None}) @unittest.skipIf(parse_version(pymatgen.__version__) <= parse_version('3.2.3'), "New QChem 4.2 PCM format in pymatgen is a feature after " "version 3.2.3") def test_pcm_solvent_deprecated(self): shutil.copyfile(os.path.join(test_dir, "pcm_solvent_deprecated.qcinp"), os.path.join(scr_dir, "pcm_solvent_deprecated.qcinp")) shutil.copyfile(os.path.join(test_dir, "pcm_solvent_deprecated.qcout"), os.path.join(scr_dir, "pcm_solvent_deprecated.qcout")) h = QChemErrorHandler(input_file="pcm_solvent_deprecated.qcinp", output_file="pcm_solvent_deprecated.qcout") has_error = h.check() self.assertTrue(has_error) d = h.correct() self.assertEqual(d, {'errors': ['Bad SCF convergence', 'Molecular charge is not found', 'No input text', 'pcm_solvent deprecated'], 'actions': ['use keyword solvent instead']}) with open(os.path.join(test_dir, "pcm_solvent_deprecated_use_qc42_format.qcinp")) as f: ref = [line.strip() for line in f.readlines()] with open(os.path.join(scr_dir, "pcm_solvent_deprecated.qcinp")) as f: ans = [line.strip() for line in f.readlines()] self.assertEqual(ref, ans) def test_not_enough_total_memory(self): old_jobid = os.environ.get("PBS_JOBID", None) os.environ["PBS_JOBID"] = "hopque473945" shutil.copyfile(os.path.join(test_dir, "not_enough_total_memory.qcinp"), os.path.join(scr_dir, "not_enough_total_memory.qcinp")) shutil.copyfile(os.path.join(test_dir, "not_enough_total_memory.qcout"), os.path.join(scr_dir, "not_enough_total_memory.qcout")) h = QChemErrorHandler(input_file="not_enough_total_memory.qcinp", output_file="not_enough_total_memory.qcout") has_error = h.check() self.assertTrue(has_error) d = h.correct() self.assertEqual(d, {'errors': ['Exit Code 134', 'Not Enough Total Memory'], 'actions': ['Use 48 CPSCF segments']}) with open(os.path.join(test_dir, "not_enough_total_memory_48_segments.qcinp")) as f: ref = [line.strip() for line in f.readlines()] with open(os.path.join(scr_dir, "not_enough_total_memory.qcinp")) as f: ans = [line.strip() for line in f.readlines()] self.assertEqual(ref, ans) shutil.copyfile(os.path.join(test_dir, "not_enough_total_memory_48_segments.qcinp"), os.path.join(scr_dir, "not_enough_total_memory_48_segments.qcinp")) shutil.copyfile(os.path.join(test_dir, "not_enough_total_memory.qcout"), os.path.join(scr_dir, "not_enough_total_memory.qcout")) qchem_job = QchemJob(qchem_cmd=["qchem", "-np", "24"], alt_cmd={"openmp": ["qchem", "-seq", "-nt", "24"], "half_cpus": ["qchem", "-np", "12"]}, input_file="not_enough_total_memory_48_segments.qcinp") h = QChemErrorHandler(input_file="not_enough_total_memory_48_segments.qcinp", output_file="not_enough_total_memory.qcout", qchem_job=qchem_job) has_error = h.check() self.assertTrue(has_error) d = h.correct() self.assertEqual(d, {'errors': ['Exit Code 134', 'Not Enough Total Memory'], 'actions': ['Use half CPUs and 60 CPSCF segments']}) with open(os.path.join(test_dir, "not_enough_total_memory_60_segments.qcinp")) as f: ref = [line.strip() for line in f.readlines()] with open(os.path.join(scr_dir, "not_enough_total_memory_48_segments.qcinp")) as f: ans = [line.strip() for line in f.readlines()] self.assertEqual(ref, ans) if old_jobid is None: os.environ.pop("PBS_JOBID") else: os.environ["PBS_JOBID"] = old_jobid def test_json_serializable(self): q1 = QChemErrorHandler() str1 = json.dumps(q1, cls=MontyEncoder) q2 = json.loads(str1, cls=MontyDecoder) self.assertEqual(q1.as_dict(), q2.as_dict()) shutil.copyfile(os.path.join(test_dir, "qunino_vinyl.qcinp"), os.path.join(scr_dir, "qunino_vinyl.qcinp")) shutil.copyfile(os.path.join(test_dir, "qunino_vinyl.qcout"), os.path.join(scr_dir, "qunino_vinyl.qcout")) q3 = QChemErrorHandler(input_file="qunino_vinyl.qcinp", output_file="qunino_vinyl.qcout") q3.check() q3.correct() for od in q3.outdata: od.pop("input") str3 = json.dumps(q3, cls=MontyEncoder) q4 = json.loads(str3, cls=MontyDecoder) self.assertEqual(q3.as_dict(), q4.as_dict()) def tearDown(self): shutil.rmtree(scr_dir) pass if __name__ == "__main__": unittest.main()
davidwaroquiers/custodian
custodian/qchem/tests/test_handlers.py
Python
mit
45,040
[ "pymatgen" ]
b40a15ad16832f7331b9287e94be11f8f3194dccdcf57a68cb4f22be30cfadd4
# -*- coding: utf-8 -*- # vi:si:et:sw=4:sts=4:ts=4 ## ## Copyright (C) 2012 Async Open Source <http://www.async.com.br> ## All rights reserved ## ## This program is free software; you can redistribute it and/or modify ## it under the terms of the GNU Lesser General Public License as published by ## the Free Software Foundation; either version 2 of the License, or ## (at your option) any later version. ## ## This program is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU Lesser General Public License for more details. ## ## You should have received a copy of the GNU Lesser General Public License ## along with this program; if not, write to the Free Software ## Foundation, Inc., or visit: http://www.gnu.org/. ## ## Author(s): Stoq Team <stoq-devel@async.com.br> ## import datetime from stoqlib.domain.person import ClientSalaryHistory, LoginUser from stoqlib.gui.search.clientsalaryhistorysearch import ( ClientSalaryHistorySearch) from stoqlib.gui.test.uitestutils import GUITest class TestClientSalaryHistorySearch(GUITest): def test_search(self): client = self.create_client() users = self.store.find(LoginUser).order_by(LoginUser.username) user_a = users[0] user_b = users[1] ClientSalaryHistory(date=datetime.datetime(2012, 1, 1), new_salary=1000, old_salary=0, client=client, user=user_a, store=self.store) ClientSalaryHistory(date=datetime.datetime(2012, 2, 2), new_salary=2000, old_salary=1000, client=client, user=user_b, store=self.store) ClientSalaryHistory(date=datetime.datetime(2012, 3, 3), new_salary=3000, old_salary=2000, client=client, user=user_a, store=self.store) search = ClientSalaryHistorySearch(self.store, client) search.search.refresh() self.check_search(search, 'client-salary-history-no-filter') search.set_searchbar_search_string('ad') search.search.refresh() self.check_search(search, 'client-salary-history-string-filter')
andrebellafronte/stoq
stoqlib/gui/test/test_clientsalaryhistorysearch.py
Python
gpl-2.0
2,544
[ "VisIt" ]
2f1879fb9aa0a80fd05827bddbe45cc885c2bac66738e1b1c8294cbe82f9ebaa
#!/usr/bin/env python # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # A copy of the GNU General Public License is available at # http://www.gnu.org/licenses/gpl-3.0.html """ Write a GROMACS index file with one group per membrane leaflet. """ from __future__ import print_function, division, with_statement import argparse import itertools import textwrap import sys import os __author__ = "Jonathan Barnoud" # File format description. The key is the name of the field, the value is a # tuple from which the first element is the first (included) and last # (excluded) indices of the field in the line, and the second element the type # of the field content. GRO_FIELDS = { "resid": ((0, 5), int), "resname": ((5, 10), str), "atom_name": ((10, 15), str), "atomid": ((15, 20), int), "x": ((20, 28), float), "y": ((28, 36), float), "z": ((36, 44), float), } PDB_FIELDS = { "resid": ((22, 26), int), "resname": ((17, 20), str), "atom_name": ((12, 16), str), "atomid": ((6, 11), int), "x": ((30, 38), float), "y": ((38, 46), float), "z": ((46, 54), float), } # All the authorized values for the first field of a line in a PDB file PDB_SECTIONS = ( "HEADER", "OBSLTE", "TITLE", "SPLT", "CAVEAT", "COMPND", "SOURCE", "KEYWDS", "EXPDTA", "NUMMDL", "MDLTYP", "AUTHOR", "REVDAT", "SPRSDE", "JRNL", "REMARK", "DBREF", "DBREF1", "DBREF2", "SEQADV", "SEQRES", "MODRES", "HET", "FORMUL", "HETNAM", "HETSYN", "HELIX", "SHEET", "SSBOND", "LINK", "CISPEP", "SITE", "CRYST1", "MTRIX", "ORIGX", "SCALE", "MODEL", "ATOM", "ANISOU", "TER", "HETATM", "ENDMDL", "CONECT", "MASTER", "END", "ENDMOL", ) # MTRIX, ORIGX and SCALE pdb sections can be followed be a random # number so they need to be looked at separately PDB_SHORT_SECTION = ("MTRIX", "ORIGX", "SCALE") class FormatError(Exception): """ Exception raised when the file format is wrong. """ pass def isfile(path): """ Check if path is an existing file. If not, raise an error. Else, return the path. """ if not os.path.isfile(path): if os.path.isdir(path): msg = "{0} is a directory".format(path) else: msg = "{0} does not exist.".format(path) raise argparse.ArgumentTypeError(msg) return path def valid_pdb_line(line): """ Raise a FormatError if the given line does not start like a valid PDB line """ if not (line[0:6].strip() in PDB_SECTIONS or line.strip() == ""): # MTRIX, ORIGX and SCALE pdb sections can be followed be a random # number so they will trigger the previous test if not line[0:4] in PDB_SHORT_SECTION: raise FormatError('PDB line should not start with "{0}"' .format(line[0:6])) def parse_selection(selection): """ Read the atom selection given in argument The atom selection is formatted as follow: :: POPC:PO4 DUPC:PO4 CHOL:ROH Each string separated by a space represents one atom type, before the column is the residue name, after it is the atom name. The residue name can be omitted, then the column is omitted too: :: PO4 CHOL:ROH The final output is a list of tuples. The each tuple represents an atom type, the first element of the tuple is the residue name, the second element is the atom name. When the residue name is omitted then the tuple counts only one element: the atom name. Because the function is called from argparse the split on spaces is already done before entering the function, so here we only deal with one single atom type. """ return tuple(selection.split(':')) def stop_at_empty_line(iterator): """ Yield all item of an iterator but stop when the item is an empty line. An empty line is a string which is empty when stripped. """ for line in iterator: if line.strip() == "": return yield line def except_last(iterator): """ Yield all elements of an iterator but the last one. :Parameters: - iterator: the iterator on which to iterate """ previous = next(iterator) for line in iterator: yield previous previous = line def read_gro(lines): """ Read the atoms from a gro file. This function create a generator that yield a dictionary per line of the gro file. :Parameters: - lines: an iterator over atom lines from the gro file. The two header lines and the bottom line describing the box have to be excluded. :Raise: - FormatError: raised if the file format does not fit. """ # "lines" might be a list and not a proper iterator lines = iter(lines) # The two first lines are a header next(lines) next(lines) # Loop over the lines, stop before an empty line and ignore the last # non empty line since it describes the box for line in except_last(stop_at_empty_line(lines)): try: atom = dict(((key, convert(line[begin:end].strip())) for key, ((begin, end), convert) in GRO_FIELDS.items())) except ValueError: raise FormatError yield atom def read_pdb(lines): """ Read the atoms from a pdb file. This function create a generator that yield a dictionary per line of the pdb file. :Parameters: - lines: an iterator over the lines from the PDB file :Raise: - FormatError: raised if the file format does not fit. """ for line in lines: # Test if the line starts as it should in a PDB file valid_pdb_line(line) if line[0:6] == "ATOM ": try: atom = dict(((key, convert(line[begin:end].strip())) for key, ((begin, end), convert) in PDB_FIELDS.items())) except ValueError: raise FormatError yield atom def is_selected(atom, selection): """ Return True is the atom fit the selection criteria. """ for atom_type in selection: if len(atom_type) == 1 and atom["atom_name"] == atom_type[0]: return True if (atom["resname"] == atom_type[0] and atom["atom_name"] == atom_type[1]): return True return False def select_atoms(atoms, selection): """ Select only the atoms with the given atom name and residue name. This function create a generator that yield the dictionary for the atoms with the given atom name and residue name. :Parameters: - atoms: an iterator over the atom dictionaries - selection: an atom selection as described in ``parse_selection`` """ for atom in atoms: if is_selected(atom, selection): yield atom def axis_coordinates(atoms, axis): """ Get the coordinate of the atom along the given axis. Create a generator on the atom coordinate along the axis of interest. :Parameters: - atoms: an iterator over the atom dictionaries - axis: the name of the dimension normal to the membrane (x, y or z) """ for atom in atoms: yield atom[axis] def mean(values): """ Calculate the mean of an iterator. """ summation = 0 nelements = 0 for value in values: summation += value nelements += 1 return summation / nelements def split(atoms, average, axis): """ Split the leaflets along the given axis. """ groups = {"upper_leaflet": [], "lower_leaflet": []} for atom in atoms: if atom[axis] >= average: groups["upper_leaflet"].append(atom["atomid"]) else: groups["lower_leaflet"].append(atom["atomid"]) return groups def split_get_res(atoms, average, axis, selection): """ Split the leaflets along the given axis and keep the whole residue. """ groups = {"upper_leaflet": [], "lower_leaflet": []} keep_res = None current_resid = None current_res_atoms = [] current_group = None for atom in atoms: # Keep track of the atoms of the residue, this is needed to have # have in the groups the atoms of a residue of interest that have been # read before the reference atom if atom["resid"] != current_resid: # We start a new residue current_resid = atom["resid"] current_res_atoms = [] # Always reset the keep_res variable when we change the residue. # If we miss that we can catch extra residues because of the # reseting of the residue number that happend when the resid become # too big. keep_res = None current_res_atoms.append(atom["atomid"]) # Split the residues of interest if is_selected(atom, selection) and not keep_res: keep_res = atom["resid"] # Choose the group if atom[axis] >= average: current_group = "upper_leaflet" else: current_group = "lower_leaflet" # Add the atom of the residue that were read before the reference # atom. Do not include the last atom of the list since it is the # current atom and that he will be added to the group later. groups[current_group] += current_res_atoms[:-1] # Store the atom in the right group if not keep_res is None and atom["resid"] == keep_res: groups[current_group].append(atom["atomid"]) return groups def write_ndx(groups): """ Write a gromacs index file with the given groups. """ for group_name, atomids in groups.items(): print("[ {0} ]".format(group_name)) group_str = " ".join([str(i) for i in atomids]) print("\n".join(textwrap.wrap(group_str, 80))) def split_leaflets(infile, axis, selection, file_reader, res=False): """ Split bilayer leaflets from a gromacs gro file along the given axis. :Parameters: - infile: the input file describing the structure as a iterator over the structure file (gro or pdb format) - axis: the dimension normal to the membrane plane (x, y, or z) - selection: an atom selection list as outputed by ``parse_selection'', the selection is used to get the reference atoms - file_reader: a callback to the function that will read the input (typically read_gro or read_pdb) - res: a boolean, True if you want to keep whole residues in the output, False by default :Return: - a dictionary, they keys are "upper_leaflet" and "lower_leaflet", the values are lists of atom indices in each leaflet. The indices start at 1! """ axis = axis.lower() atoms = list(file_reader(infile)) selected = list(select_atoms(atoms, selection)) coordinates = axis_coordinates(selected, axis) average = mean(coordinates) if res: groups = split_get_res(atoms, average, axis, selection) else: groups = split(selected, average, axis) return groups def get_options(argv): """ Read the command line arguments. """ usage = ("%(prog)s [options] < input > output.ndx\n" " %(prog)s [options] -- input > output.ndx") parser = argparse.ArgumentParser(description=__doc__, usage=usage) parser.add_argument("input", default=None, nargs='?', type=isfile, help="The input structure.") parser.add_argument("--axis", "-d", choices="xyz", default="z", help="Axis normal to the bilayer.") parser.add_argument("--atom", "-a", type=parse_selection, default=[("P1",)], nargs='+', help="Reference atom name.") parser.add_argument("--format", "-f", type=str, default="auto", choices=["gro", "pdb", "auto"], help="Input file format.") keep_options = parser.add_mutually_exclusive_group() keep_options.add_argument("--keep-residue", "-r", action="store_true", dest="keep_residue", default=False, help="Keep the whole residues.") keep_options.add_argument("--keep-atom", "-k", action="store_false", dest="keep_residue", default=False, help="Keep only the atom of reference.") args = parser.parse_args(argv) return args def guess_format(infile): """ Guess the format of the input file among gro and pdb. Look if the file is a PDB one or assume it is a gro file. Return the format and an iterator that mimic the input file. """ read_lines = [] # Empty lines are not informative, let's go to the first not empty line line = infile.readline() while len(line) >= 1 and line.strip() == "": read_lines.append(line) line = infile.readline() read_lines.append(line) # If the file is empty it is not worth continuing if not line: return 'empty', [] # Check if the line could be from a PDB file, if not we probably are # reading a gro file try: valid_pdb_line(line) except FormatError: input_format = "gro" else: input_format = "pdb" mod_infile = itertools.chain(read_lines, infile) return input_format, mod_infile def reformat_selection(selection): """ Generate a human readable string from an atom selection critera list. """ return " ".join([":".join(criterion) for criterion in selection]) def main(): """ Run everything from the command line. """ args = get_options(sys.argv[1:]) if args.input is None: print("Read input from the standard input.", file=sys.stderr) infile = sys.stdin else: try: infile = open(args.input) except IOError as error: print("Error while oppening file {0}".format(error.filename), file=sys.stderr) return 1 with infile: # Guess the format if args.format == "auto": input_format, mod_infile = guess_format(infile) else: input_format = args.format mod_infile = infile # Complain if the file is known to be empty if input_format == 'empty': print("The file is empty!", file=sys.stderr) return 1 readers = {"gro": read_gro, "pdb": read_pdb} file_reader = readers[input_format] # Do the work try: groups = split_leaflets(mod_infile, args.axis, args.atom, file_reader, args.keep_residue) # Complain if the format is wrong except FormatError: if (args.format == "auto"): print("Error while reading the input. Are you sure your file " "is in the pdb or gro format?", file=sys.stderr) else: print(("Error while reading the input. Are you sure your file " "is in the {0} format?").format(args.format), file=sys.stderr) return 1 # Complain if the reference atom is absent except ZeroDivisionError: print(("There seems to be no atom corresponding to your reference " "selection. Are you sure this selection is present in your " "structure: '{0}'?") .format(reformat_selection(args.atom)), file=sys.stderr) return 1 else: write_ndx(groups) # Display the number of atoms per group for group_name, atomids in groups.items(): print("{0}: {1} atoms".format(group_name, len(atomids)), file=sys.stderr) if len(groups["upper_leaflet"]) == len(groups["lower_leaflet"]): print("The membrane is symmetric.", file=sys.stderr) return 0 if __name__ == "__main__": sys.exit(main())
jbarnoud/splitleafs
splitleafs.py
Python
gpl-3.0
16,657
[ "Gromacs" ]
eb88b98b5e6dc2cd5d46fbf71ee0eb2f609738e48e631b80c5eb83f87d876f84
#!/bin/env python """ Module openmm.unit.baseunit Contains BaseUnit class, which is a component of the Unit class. This is part of the OpenMM molecular simulation toolkit originating from Simbios, the NIH National Center for Physics-Based Simulation of Biological Structures at Stanford, funded under the NIH Roadmap for Medical Research, grant U54 GM072970. See https://simtk.org. Portions copyright (c) 2012 Stanford University and the Authors. Authors: Christopher M. Bruns Contributors: Peter Eastman Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS, CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ from __future__ import print_function, division, absolute_import __author__ = "Christopher M. Bruns" __version__ = "0.6" class BaseUnit(object): ''' Physical unit expressed in exactly one BaseDimension. For example, meter_base_unit could be a BaseUnit for the length dimension. The BaseUnit class is used internally in the more general Unit class. ''' __array_priority__ = 100 def __init__(self, base_dim, name, symbol): """Creates a new BaseUnit. Parameters - self: The newly created BaseUnit. - base_dim: (BaseDimension) The dimension of the new unit, e.g. 'mass' - name: (string) Name of the unit, e.g. "kilogram" - symbol: (string) Symbol for the unit, e.g. 'kg'. This symbol will appear in Quantity string descriptions. """ self.dimension = base_dim self.name = name self.symbol = symbol self._conversion_factor_to = {} self._conversion_factor_to[self] = 1.0 self._conversion_factor_to_by_name = {} self._conversion_factor_to_by_name[self.name] = 1.0 def __lt__(self, other): """ Comparison function that sorts BaseUnits by BaseDimension """ # First sort on dimension if self.dimension != other.dimension: return self.dimension < other.dimension # Second on conversion factor return self.conversion_factor_to(other) < 1.0 def iter_base_dimensions(self): """ Returns a dictionary of BaseDimension:exponent pairs, describing the dimension of this unit. """ yield (self.dimension, 1) def iter_base_units(self): yield (self, 1) def get_dimension_tuple(self): """ Returns a sorted tuple of (BaseDimension, exponent) pairs, that can be used as a dictionary key. """ l = list(self.iter_base_dimensions()) l.sort() return tuple(l) def __str__(self): """Returns a string with the name of this BaseUnit """ return self.name def __repr__(self): return 'BaseUnit(base_dim=%s, name="%s", symbol="%s")' % (self.dimension, self.name, self.symbol) def define_conversion_factor_to(self, other, factor): """ Defines a conversion factor between two BaseUnits. self * factor = other Parameters: - self: (BaseUnit) 'From' unit in conversion. - other: (BaseUnit) 'To' unit in conversion. - factor: (float) Conversion factor. After calling this method, both self and other will have stored conversion factors for one another, plus all other BaseUnits which self and other have previously defined. Both self and other must have the same dimension, otherwise a TypeError will be raised. Returns None. """ if self.dimension != other.dimension: raise TypeError('Cannot define conversion for BaseUnits with different dimensions.') assert(factor != 0) assert(not self is other) # import all transitive conversions self._conversion_factor_to[other] = factor self._conversion_factor_to_by_name[other.name] = factor for (unit, cfac) in other._conversion_factor_to.items(): if unit is self: continue if unit in self._conversion_factor_to: continue self._conversion_factor_to[unit] = factor * cfac unit._conversion_factor_to[self] = pow(factor * cfac, -1) self._conversion_factor_to_by_name[unit.name] = factor * cfac unit._conversion_factor_to_by_name[self.name] = pow(factor * cfac, -1) # and for the other guy invFac = pow(factor, -1.0) other._conversion_factor_to[self] = invFac other._conversion_factor_to_by_name[self.name] = invFac for (unit, cfac) in self._conversion_factor_to.items(): if unit is other: continue if unit in other._conversion_factor_to: continue other._conversion_factor_to[unit] = invFac * cfac unit._conversion_factor_to[other] = pow(invFac * cfac, -1) other._conversion_factor_to_by_name[unit.name] = invFac * cfac unit._conversion_factor_to_by_name[other.name] = pow(invFac * cfac, -1) def conversion_factor_to(self, other): """Returns a conversion factor from this BaseUnit to another BaseUnit. It does not matter which existing BaseUnit you define the conversion factor to. Conversions for all other known BaseUnits will be computed at the same time. Raises TypeError if dimension does not match. Raises LookupError if no conversion has been defined. (see define_conversion_factor_to). """ if self is other: return 1.0 if self.dimension != other.dimension: raise TypeError('Cannot get conversion for BaseUnits with different dimensions.') if not other.name in self._conversion_factor_to_by_name: raise LookupError('No conversion defined from BaseUnit "%s" to "%s".' % (self, other)) return self._conversion_factor_to_by_name[other.name] # run module directly for testing if __name__=='__main__': # Test the examples in the docstrings import doctest, sys doctest.testmod(sys.modules[__name__])
mdtraj/mdtraj
mdtraj/utils/unit/baseunit.py
Python
lgpl-2.1
6,929
[ "OpenMM" ]
ad8c5c43c25f2578651647c2264a886e56de6d5f49cc4fe7a4ba8a6718fdce66
"""A simple filter that thresholds CitcomS Caps from input data.""" # Author: Martin Weier # #Copyright (C) 2006 California Institute of Technology #This program is free software; you can redistribute it and/or modify #it under the terms of the GNU General Public License as published by #the Free Software Foundation; either version 2 of the License, or #any later version. #This program is distributed in the hope that it will be useful, #but WITHOUT ANY WARRANTY; without even the implied warranty of #MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #GNU General Public License for more details. #You should have received a copy of the GNU General Public License #along with this program; if not, write to the Free Software #Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA # Enthought library imports. from enthought.traits import Instance, Range, Int from enthought.traits.ui import View, Group, Item from enthought.tvtk.api import tvtk # Local imports from enthought.mayavi.core.filter import Filter ###################################################################### # `Threshold` class. ###################################################################### class CitcomSshowCaps(Filter): # The version of this class. Used for persistence. __version__ = 0 # The threshold filter. ugrid_filter = Instance(tvtk.ExtractUnstructuredGrid, ()) # Lower threshold (this is a dynamic trait that is changed when # input data changes). lower_threshold = Range(0, 12, 0, desc='the lower threshold of the filter') # Upper threshold (this is a dynamic trait that is changed when # input data changes). upper_threshold = Range(0, 12, 12, desc='the upper threshold of the filter') # Our view. view = View(Group(Item(name='lower_threshold'), Item(name='upper_threshold')) ) n = Int() caps = Int() ###################################################################### # `Filter` interface. ###################################################################### def setup_pipeline(self): """Override this method so that it *creates* its tvtk pipeline. This method is invoked when the object is initialized via `__init__`. Note that at the time this method is called, the tvtk data pipeline will *not* yet be setup. So upstream data will not be available. The idea is that you simply create the basic objects and setup those parts of the pipeline not dependent on upstream sources and filters. """ # Just setup the default output of this filter. self.ugrid_filter.point_clipping = 1 self.ugrid_filter.merging = 0 self.outputs = [self.ugrid_filter.output] def update_pipeline(self): """Override this method so that it *updates* the tvtk pipeline when data upstream is known to have changed. This method is invoked (automatically) when the input fires a `pipeline_changed` event. """ # By default we set the input to the first output of the first # input. fil = self.ugrid_filter fil.input = self.inputs[0].outputs[0] #Than we calculate how many points belong to one cap self.caps = 12 self.n = self.inputs[0].outputs[0].number_of_points/12 #Than we set the output of the filter self.outputs[0] = fil.output self.outputs.append(self.inputs[0].outputs[0]) self.pipeline_changed = True def update_data(self): """Override this method to do what is necessary when upstream data changes. This method is invoked (automatically) when any of the inputs sends a `data_changed` event. """ fil = self.ugrid_filter fil.update() # Propagate the data_changed event. self.data_changed = True ###################################################################### # Non-public interface ###################################################################### def _lower_threshold_changed(self,old_value, new_value): """Callback interface for the lower threshold slider""" fil = self.ugrid_filter fil.point_minimum = (self.lower_threshold)*(self.n) fil.update() self.data_changed = True def _upper_threshold_changed(self, old_value, new_value): """Callback interface for the upper threshold slider""" fil = self.ugrid_filter fil.point_maximum = self.upper_threshold*(self.n) fil.update() self.data_changed = True
geodynamics/citcoms
visual/Mayavi2/original_plugins/plugins/filter/CitcomSshowCaps.py
Python
gpl-2.0
4,788
[ "Mayavi" ]
a2674e3a2d86e9acfc630ccf2c8f2c4b1f259f4cc9dfb89e0d8d098a248e2abe
""" Copyright [2009-2017] EMBL-European Bioinformatics Institute Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ """ Expert databases. """ # new database template # { # 'name': '', # 'label': '', # 'url': '', # 'description': '', # 'hint': '', # 'tags': ['', '', ''], # 'abbreviation': '', # 'examples': [ # {'upi': '', 'taxid': 0}, # {'upi': '', 'taxid': 0}, # {'upi': '', 'taxid': 0}, # ], # 'references': [ # { # 'title': '', # 'authors': '', # 'journal': '', # 'pubmed_id': '', # }, # ], # 'imported': True, # 'status': 'new', # 'version': '', # }, expert_dbs = [ { 'name': 'ENA', 'label': 'ena', 'url': 'https://www.ebi.ac.uk/ena/browser/', 'description': "provides a comprehensive record of the world's nucleotide sequencing information", 'hint': "ENA is a comprehensive record of the world's nucleotide sequencing information", 'tags': ['all ncRNA types', 'sequence archive'], 'abbreviation': 'European Nucleotide Archive', 'examples': [ {'upi': 'URS00002D0E0C', 'taxid': 10090}, {'upi': 'URS000035EE7E', 'taxid': 9606}, {'upi': 'URS0000000001', 'taxid': 77133}, ], 'references': [ { 'title': 'The European Nucleotide Archive in 2017', 'authors': 'Silvester et al.', 'journal': 'Nucleic Acids Res. 2017', 'pubmed_id': '29140475', }, ], 'imported': True, 'status': 'updated', 'version': 'as of 3 Sept 2021', }, { 'name': 'PDBe', 'label': 'pdbe', 'url': 'http://www.ebi.ac.uk/pdbe/', 'description': 'is the European repository of information about the 3D structures of large biological molecules. PDBe is a member of the Worldwide Protein Data Bank', 'hint': 'PDBe is the European repository of information about the 3D structures of large biological molecules. PDBe is a member of the Worldwide Protein Data Bank', 'tags': ['curated', '3D structure'], 'abbreviation': 'Protein Data Bank in Europe', 'examples': [ {'upi': 'URS00000ABFE9', 'taxid': 562}, # E.coli SSU, 4V4Q chain AA {'upi': 'URS00005A14E2', 'taxid': 9606}, # Human SSU, 4V6X chain B2 {'upi': 'URS000032B6B6', 'taxid': 9606}, # Human U1 snRNA, PDB 3PGW chain N ], 'references': [ { 'title': 'PDBe: Protein Data Bank in Europe', 'authors': 'Gutmanas A, Alhroub Y, Battle GM, Berrisford JM, Bochet E, Conroy MJ, Dana JM, Fernandez Montecelo MA, van Ginkel G, Gore SP et al.', 'journal': 'Nucleic Acids Res. 2014 Jan;42(Database issue):D285-91', 'pubmed_id': '24288376', }, ], 'imported': True, 'status': 'updated', 'version': 'as of 3 Sept 2021', }, { 'name': 'FlyBase', 'label': 'flybase', 'url': 'http://flybase.org/', 'description': 'is a database of Drosophila genes and genomes', 'hint': 'FlyBase is a database of Drosophila genes and genomes', 'tags': ['curated', 'model organism', 'Drosophila'], 'abbreviation': '', 'examples': [ {'upi': 'URS00007F7879', 'taxid': 7227}, {'upi': 'URS00007EBD0C', 'taxid': 7227}, {'upi': 'URS00002B64E6', 'taxid': 7227}, ], 'references': [ { 'title': 'FlyBase 2.0: the next generation', 'authors': 'The FlyBase Consortium', 'journal': 'Nucleic Acids Res. 2019;47(D1):D759-D765', 'pubmed_id': '30364959', }, ], 'imported': True, 'status': 'updated', 'version': 'FB2021_04', }, { 'name': 'Ensembl', 'label': 'ensembl', 'url': 'http://ensembl.org/', 'description': 'is a genome browser for vertebrate genomes and model organisms that supports research in comparative genomics, evolution, sequence variation and transcriptional regulation', 'hint': 'Ensembl is a genome browser for vertebrate genomes and model organisms that supports research in comparative genomics, evolution, sequence variation and transcriptional regulation', 'tags': ['reference genomes'], 'abbreviation': '', 'examples': [ {'upi': 'URS000025784F', 'taxid': 9606}, {'upi': 'URS000075A546', 'taxid': 9606}, {'upi': 'URS00005CF03F', 'taxid': 9606}, ], 'references': [ { 'title': 'Ensembl 2017', 'authors': 'Aken BL, Achuthan P, Akanni W, Amode MR, Bernsdorff F, Bhai J, Billis K, Carvalho-Silva D, Cummins C, Clapham P et al.', 'journal': 'Nucleic Acids Res. 2017 Jan 4;45(D1):D635-D642', 'pubmed_id': '27899575', }, { 'title': 'The Ensembl gene annotation system.', 'authors': 'Aken BL, Ayling S, Barrell D, Clarke L, Curwen V, Fairley S, Fernandez Banet J, Billis K, Garcia Giron C, Hourlier T, Howe K, Kahari A, Kokocinski F, Martin FJ, Murphy DN, Nag R, Ruffier M, Schuster M, Tang YA, Vogel JH, White S, Zadissa A, Flicek P, Searle SM.', 'journal': 'Database (Oxford). 2016 Jun 23;2016.', 'pubmed_id': '27337980', } ], 'imported': True, 'status': '', 'version': '104', }, { 'name': 'Ensembl Plants', 'label': 'ensembl_plants', 'url': 'https://plants.ensembl.org/', 'description': 'is a genome browser for plant genomes that complements the Ensembl database', 'hint': 'Ensembl Plants is a genome browser for plant genomes that complements the Ensembl database', 'tags': ['reference genomes'], 'abbreviation': '', 'examples': [ {'upi': 'URS0000A77357', 'taxid': 3702}, {'upi': 'URS0000A7685E', 'taxid': 3702}, {'upi': 'URS00005391BB', 'taxid': 3702}, ], 'references': [ { 'title': 'Ensembl Genomes 2018: an integrated omics infrastructure for non-vertebrate species', 'authors': 'Kersey PJ, Allen JE, Allot A, Barba M, Boddu S, Bolt BJ, Carvalho-Silva D, et al.', 'journal': 'Nucleic Acids Res. 2018 Jan 4;46(D1):D802-D808', 'pubmed_id': '29092050', }, ], 'imported': True, 'status': '', 'version': '51', }, { 'name': 'Ensembl Fungi', 'label': 'ensembl_fungi', 'url': 'https://fungi.ensembl.org/', 'description': 'is a genome browser for fungi genomes that complements the Ensembl database', 'hint': 'Ensembl Fungi is a genome browser for fungi genomes that complements the Ensembl database', 'tags': ['reference genomes'], 'abbreviation': '', 'examples': [ {'upi': 'URS00006E4BC8', 'taxid': 644358}, {'upi': 'URS00006DF6F8', 'taxid': 334819}, {'upi': 'URS00004CEEE1', 'taxid': 402676}, ], 'references': [ { 'title': 'Ensembl Genomes 2018: an integrated omics infrastructure for non-vertebrate species', 'authors': 'Kersey PJ, Allen JE, Allot A, Barba M, Boddu S, Bolt BJ, Carvalho-Silva D, et al.', 'journal': 'Nucleic Acids Res. 2018 Jan 4;46(D1):D802-D808', 'pubmed_id': '29092050', }, ], 'imported': True, 'status': '', 'version': '51', }, { 'name': 'Ensembl Metazoa', 'label': 'ensembl_metazoa', 'url': 'https://metazoa.ensembl.org/', 'description': 'is a genome browser for metazoan genomes that complements the Ensembl database', 'hint': 'Ensembl Metazoa is a genome browser for metazoan genomes that complements the Ensembl database', 'tags': ['reference genomes'], 'abbreviation': '', 'examples': [ {'upi': 'URS00006F2B82', 'taxid': 121224}, {'upi': 'URS0000C28C34', 'taxid': 136037}, {'upi': 'URS00006AD331', 'taxid': 7668}, ], 'references': [ { 'title': 'Ensembl Genomes 2018: an integrated omics infrastructure for non-vertebrate species', 'authors': 'Kersey PJ, Allen JE, Allot A, Barba M, Boddu S, Bolt BJ, Carvalho-Silva D, et al.', 'journal': 'Nucleic Acids Res. 2018 Jan 4;46(D1):D802-D808', 'pubmed_id': '29092050', }, ], 'imported': True, 'status': '', 'version': '51', }, { 'name': 'Ensembl Protists', 'label': 'ensembl_protists', 'url': 'https://protists.ensembl.org/', 'description': 'is a genome browser for protist genomes that complements the Ensembl database', 'hint': 'Ensembl Protists is a genome browser for protist genomes that complements the Ensembl database', 'tags': ['reference genomes'], 'abbreviation': '', 'examples': [ {'upi': 'URS00000900A9', 'taxid': 347515}, {'upi': 'URS0000716773', 'taxid': 312017}, {'upi': 'URS0000C74655', 'taxid': 559515}, ], 'references': [ { 'title': 'Ensembl Genomes 2018: an integrated omics infrastructure for non-vertebrate species', 'authors': 'Kersey PJ, Allen JE, Allot A, Barba M, Boddu S, Bolt BJ, Carvalho-Silva D, et al.', 'journal': 'Nucleic Acids Res. 2018 Jan 4;46(D1):D802-D808', 'pubmed_id': '29092050', }, ], 'imported': True, 'status': '', 'version': '51', }, { 'name': 'Ensembl/GENCODE', 'label': 'ensembl_gencode', 'url': 'http://gencodegenes.org/', 'description': 'produces high quality reference gene annotation and experimental validation for human and mouse genomes', 'hint': 'GENCODE produces high quality reference gene annotation and experimental validation for human and mouse genomes', 'tags': ['curated', 'human', 'mouse', 'gene annotation'], 'abbreviation': '', 'examples': [ {'upi': 'URS00000B15DA', 'taxid': 9606}, {'upi': 'URS00000A54A6', 'taxid': 9606}, {'upi': 'URS000078452D', 'taxid': 10090}, ], 'references': [ { 'title': 'GENCODE: the reference human genome annotation for The ENCODE Project', 'authors': 'Harrow J, Frankish A, Gonzalez JM, Tapanari E, Diekhans M, Kokocinski F, Aken BL, Barrell D, Zadissa A et al.', 'journal': 'Genome research 2012;22;9;1760-74', 'pubmed_id': '22955987', }, ], 'imported': True, 'status': '', 'version': 'human 38/mouse M27', }, { 'name': 'Rfam', 'label': 'rfam', 'url': 'http://rfam.org', 'description': 'is a collection of non-coding RNA families represented by manually curated sequence alignments, consensus secondary structures and predicted homologues', 'hint': 'Rfam is a collection of non-coding RNA families, represented by manually curated sequence alignments, consensus secondary structures and predicted homologues', 'tags': ['curated', 'automatic', 'alignments'], 'abbreviation': '', 'examples': [ {'upi': 'URS00000478B7', 'taxid': 9606}, {'upi': 'URS000023DE4C', 'taxid': 9606}, {'upi': 'URS000068EEC5', 'taxid': 8752}, ], 'references': [ { 'title': 'Rfam 13.0: Shifting to a genome-centric resource for non- coding RNA families', 'authors': 'Kalvari I, Argasinska J, Quinones-Olvera N, Nawrocki EP, Rivas E, Eddy SR, Bateman A, Finn RD, Petrov AI', 'journal': 'Nucleic Acids Res. 2017 (Accepted)', 'pubmed_id': '29112718', }, ], 'imported': True, 'status': '', 'version': '14.2', }, { 'name': 'miRBase', 'label': 'mirbase', 'url': 'http://www.mirbase.org/', 'description': 'is a database of published miRNA sequences and annotations that provides a centralised system for assigning names to miRNA genes', 'hint': 'miRBase contains high-quality miRNA annotations; miRBase is responsible for assigning official miRNA gene names', 'tags': ['curated', 'miRNA'], 'abbreviation': '', 'examples': [ {'upi': 'URS000075A685', 'taxid': 9606}, {'upi': 'URS00003B7674', 'taxid': 10090}, {'upi': 'URS000016FD1A', 'taxid': 9598}, ], 'references': [ { 'title': 'miRBase: integrating microRNA annotation and deep-sequencing data', 'authors': 'Kozomara A., Griffiths-Jones S.', 'journal': 'Nucleic Acids Res. 39(Database issue): D152-7 (2011 Jan)', 'pubmed_id': '21037258', }, { 'title': 'miRBase: microRNA sequences, targets and gene nomenclature', 'authors': 'Griffiths-Jones S, Grocock RJ, van Dongen S, Bateman A, Enright AJ', 'journal': 'Nucleic Acids Res. 34(Database issue):D140-4 (2006 Jan1)', 'pubmed_id': '16381832' } ], 'imported': True, 'status': '', 'version': '22.1', }, { 'name': 'Vega', 'label': 'vega', 'url': 'http://vega.sanger.ac.uk/', 'description': 'is a repository for high-quality gene models produced by the manual annotation of vertebrate genomes. Human and mouse data from Vega are merged into <a href="http://www.gencodegenes.org/" target="_blank">GENCODE</a>', 'hint': 'Vega will be replaced by GENCODE in the next release of RNAcentral', 'tags': ['curated', 'lncRNA'], 'abbreviation': 'Vertebrate Genome Annotation', 'examples': [ {'upi': 'URS00000B15DA', 'taxid': 9606}, {'upi': 'URS00000A54A6', 'taxid': 9606}, {'upi': 'URS0000301B08', 'taxid': 9606}, ], 'references': [ { 'title': 'The GENCODE v7 catalog of human long noncoding RNAs: analysis of their gene structure, evolution, and expression.', 'authors': 'Derrien T., Johnson R., Bussotti G., Tanzer A., Djebali S., Tilgner H., Guernec G., Martin D., Merkel A., Knowles DG. et al.', 'journal': 'Genome Res. 22(9): 1775-1789 (2012 Sep)', 'pubmed_id': '22955988', }, { 'title': 'GENCODE: the reference human genome annotation for The ENCODE Project', 'authors': 'Harrow J., Frankish A., Gonzalez JM., Tapanari E., Diekhans M., Kokocinski F., Aken BL., Barrell D., Zadissa A., Searle S. et al.', 'journal': 'Genome Res. 22(9): 1760-1774 (2012 Sep)', 'pubmed_id': '22955987', }, ], 'imported': False, 'status': 'archived', 'version': 'release 65', }, { 'name': 'tmRNA Website', 'label': 'tmrna-website', 'url': 'http://bioinformatics.sandia.gov/tmrna/', 'description': 'contains predicted tmRNA sequences from RefSeq bacterial genomes, plasmids, phages and some organelles; these include two-piece tmRNAs from permuted genes', 'hint': 'tmRNA Website contains predicted tmRNA sequences from RefSeq bacterial genomes, plasmids, phages and some organelles', 'tags': ['automatic', 'tmRNA'], 'abbreviation': '', 'examples': [ {'upi': 'URS000060F5B3', 'taxid': 398580}, {'upi': 'URS000058C344', 'taxid': 1291358}, {'upi': 'URS000048A91D', 'taxid': 224911}, ], 'references': [ { 'title': 'The tmRNA website', 'authors': 'Hudson CM, Williams KP', 'journal': 'Nucleic Acids Res. 43(Database issue):D138-40. (2015 Jan)', 'pubmed_id': '25378311', }, { 'title': 'The tmRNA website: reductive evolution of tmRNA in plastids and other endosymbionts', 'authors': 'Gueneau de Novoa P., Williams KP.', 'journal': 'Nucleic Acids Res. 32(Database issue): D104-8 (2004 Jan)', 'pubmed_id': '14681369', } ], 'imported': True, 'status': '', 'version': '', }, { 'name': 'SRPDB', 'label': 'srpdb', 'url': 'https://rth.dk/resources/rnp/SRPDB', 'description': 'provides aligned, annotated and phylogenetically ordered sequences related to structure and function of SRP', 'hint': 'SRPDB provides aligned, annotated and phylogenetically ordered sequences related to structure and function of SRP', 'tags': ['curated', 'signal recognition particle'], 'abbreviation': 'Signal Recognition Particle Database', 'examples': [ {'upi': 'URS00000478B7', 'taxid': 9606}, {'upi': 'URS00001C03DC', 'taxid': 1423}, {'upi': 'URS00005C64FE', 'taxid': 216594}, ], 'references': [ { 'title': 'Kinship in the SRP RNA family', 'authors': 'Rosenblad MA., Larsen N., Samuelsson T., Zwieb C.', 'journal': 'RNA Biol 6(5): 508-516 (2009 Nov-Dec)', 'pubmed_id': '19838050', }, { 'title': 'The tmRDB and SRPDB resources', 'authors': 'Andersen ES., Rosenblad MA., Larsen N., Westergaard JC., Burks J., Wower IK., Wower J., Gorodkin J., Samuelsson T., Zwieb C.', 'journal': 'Nucleic Acids Res. 34(Database issue): D163-8 (2006 Jan)', 'pubmed_id': '16381838', }, ], 'imported': True, 'status': '', 'version': '', }, { 'name': 'lncRNAdb', 'label': 'lncrnadb', 'url': 'http://lncrnadb.org/', 'description': 'is a database providing comprehensive annotations of eukaryotic long non-coding RNAs (lncRNAs)', 'hint': 'lncRNAdb is a database providing comprehensive annotations of eukaryotic long non-coding RNAs (lncRNAs)', 'tags': ['curated', 'lncRNA'], 'abbreviation': '', 'examples': [ {'upi': 'URS00000478B7', 'taxid': 9606}, {'upi': 'URS00005E1511', 'taxid': 9606}, {'upi': 'URS0000147018', 'taxid': 10090}, ], 'references': [ { 'title': 'lncRNAdb: a reference database for long noncoding RNAs', 'authors': 'Amaral P.P., Clark M.B., Gascoigne D.K., Dinger M.E., Mattick J.S.', 'journal': 'Nucleic Acids Res. 39(Database issue):D146-D151(2011)', 'pubmed_id': '21112873', }, ], 'imported': True, 'status': '', 'version': '', }, { 'name': 'GtRNAdb', 'label': 'gtrnadb', 'url': 'http://gtrnadb.ucsc.edu/', 'description': 'contains tRNA gene predictions on complete or nearly complete genomes', 'hint': 'GtRNAdb contains tRNA gene predictions on complete or nearly complete genomes', 'tags': ['automatic', 'tRNA', 'secondary structure'], 'abbreviation': '', 'examples': [ {'upi': 'URS000047C79B', 'taxid': 9606}, {'upi': 'URS000074448D', 'taxid': 10090}, {'upi': 'URS00001F9D54', 'taxid': 10116}, ], 'references': [ { 'title': 'GtRNAdb 2.0: an expanded database of transfer RNA genes identified in complete and draft genomes', 'authors': 'Chan P.P., Lowe T.M.', 'journal': 'Nucleic Acids Res. 2016 Jan 4;44(D1):D184-9', 'pubmed_id': '26673694', }, { 'title': 'GtRNAdb: a database of transfer RNA genes detected in genomic sequence', 'authors': 'Chan P.P., Lowe T.M.', 'journal': 'Nucleic Acids Res. 37(Database issue):D93-D97(2009)', 'pubmed_id': '18984615', }, ], 'imported': True, 'status': 'updated', 'version': 'release 19', }, { 'name': 'RefSeq', 'label': 'refseq', 'url': 'http://www.ncbi.nlm.nih.gov/refseq/', 'description': 'is a comprehensive, integrated, non-redundant, well-annotated set of reference sequences', 'hint': 'RefSeq is a comprehensive, integrated, non-redundant, well-annotated set of reference sequences', 'tags': ['curated', 'all ncRNA types'], 'abbreviation': 'NCBI Reference Sequence Database', 'examples': [ {'upi': 'URS000075A3E5', 'taxid': 10090}, {'upi': 'URS000075ADFF', 'taxid': 9606}, {'upi': 'URS00003A96B7', 'taxid': 192222}, ], 'references': [ { 'title': 'RefSeq: an update on mammalian reference sequences.', 'authors': 'Pruitt K.D., Brown G.R., Hiatt S.M., Thibaud-Nissen F., Astashyn A., Ermolaeva O., Farrell C.M., Hart J., Landrum M.J., McGarvey K.M. et al.', 'journal': 'Nucleic Acids Res. 2014 Jan;42(Database issue):D756-63', 'pubmed_id': '24259432', }, ], 'imported': True, 'status': 'updated', 'version': '208', # ftp://ftp.ncbi.nlm.nih.gov/refseq/release/RELEASE_NUMBER }, { 'name': 'RDP', 'label': 'rdp', 'url': 'http://rdp.cme.msu.edu/', 'description': 'provides quality-controlled, aligned and annotated rRNA sequences and a suite of analysis tools', 'hint': 'RDP provides quality-controlled, aligned and annotated rRNA sequences and a suite of analysis tools', 'tags': ['automatic', 'SSU rRNA'], 'abbreviation': 'Ribosomal Database Project', 'examples': [ {'upi': 'URS0000434740', 'taxid': 338963}, {'upi': 'URS000071C755', 'taxid': 224308}, {'upi': 'URS0000090853', 'taxid': 637905}, ], 'references': [ { 'title': 'Ribosomal Database Project: data and tools for high throughput rRNA analysis', 'authors': 'Cole J.R., Wang Q., Fish J.A., Chai B., McGarrell D.M., Sun Y., Brown C.T., Porras-Alfaro A., Kuske C.R., Tiedje J.M.', 'journal': 'Nucleic Acids Res. 2014 Jan;42(Database issue):D633-42', 'pubmed_id': '24288368', }, ], 'imported': True, 'status': '', 'version': '', }, { 'name': 'CRW', 'label': 'crw', 'url': 'http://crw-site.chemistry.gatech.edu/', 'description': 'provides comparative sequence and structure information for ribosomal, intron, and other RNAs', 'hint': 'CRW provides comparative sequence and structure information for ribosomal, intron, and other RNAs', 'tags': ['curated', 'SSU rRNA', '5S rRNA'], 'abbreviation': 'Comparative RNA Website', 'examples': [ {'upi': 'URS0001BCA6C0', 'taxid': 562}, # E.coli SSU {'upi': 'URS0001BCA4A9', 'taxid': 9606}, # Human SSU {'upi': 'URS0001BCA572', 'taxid': 9606}, # Human 5S ], 'references': [ { 'title': 'The comparative RNA web (CRW) site: an online database of comparative sequence and structure information for ribosomal, intron, and other RNAs', 'authors': 'Jamie J Cannone, Sankar Subramanian, Murray N Schnare, James R Collett, Lisa M DSouza, Yushi Du, Brian Feng, Nan Lin, Lakshmi V Madabusi, Kirsten M Muller, Nupur Pande, Zhidi Shang, Nan Yu, Robin R Gutell', 'journal': 'BMC Bioinformatics. 2002;3:2', 'pubmed_id': '11869452', }, ], 'imported': True, 'status': '', 'version': '', }, { 'name': 'HGNC', 'label': 'hgnc', 'url': 'http://www.genenames.org/', 'description': 'is the worldwide authority that assigns standardised nomenclature to human genes', 'hint': 'HGNC is the worldwide authority that assigns standardised nomenclature to human genes', 'tags': ['curated', 'human', 'gene nomenclature'], 'abbreviation': 'HUGO Gene Nomenclature Committee', 'examples': [ {'upi': 'URS000075C808', 'taxid': 9606}, # HOTAIR {'upi': 'URS00004ACFCF', 'taxid': 9606}, # SNORA1 {'upi': 'URS000075CF56', 'taxid': 9606}, # MIRNA-1 ], 'references': [ { 'title': 'Genenames.org: the HGNC and VGNC resources in 2017.', 'authors': 'Yates B, Braschi B, Gray KA, Seal RL, Tweedie S, Bruford EA', 'journal': 'Nucleic Acids Res. 2017 Jan 4;45(D1):D619-D625', 'pubmed_id': '27799471', }, { 'title': 'Genenames.org: the HGNC resources in 2015', 'authors': 'Gray KA, Yates B, Seal RL, Wright MW, Bruford EA', 'journal': 'Nucleic Acids Res. 2015 Jan;43(Database issue):D1079-85', 'pubmed_id': '25361968', } ], 'imported': True, 'status': 'updated', 'version': 'as of 3 Sept 2021', }, { 'name': 'Greengenes', 'label': 'greengenes', 'url': 'http://greengenes.secondgenome.com/?prefix=downloads/greengenes_database/gg_13_5/', 'description': 'is a full-length 16S rRNA gene database that provides a curated taxonomy based on de novo tree inference', 'hint': 'Greengenes is a database of full-length 16S rRNA gene that provides a curated taxonomy based on de novo tree inference', 'tags': ['automatic', 'SSU rRNA'], 'abbreviation': '', 'examples': [ {'upi': 'URS000080E226', 'taxid': 274}, {'upi': 'URS00006DE01B', 'taxid': 575788}, {'upi': 'URS00004DD3DC', 'taxid': 511145}, ], 'references': [ { 'title': 'An improved Greengenes taxonomy with explicit ranks for ecological and evolutionary analyses of bacteria and archaea', 'authors': 'McDonald D, Price MN, Goodrich J, Nawrocki EP, DeSantis TZ, Probst A, Andersen GL, Knight R, Hugenholtz P', 'journal': 'ISME J. 2012 Mar;6(3):610-8', 'pubmed_id': '22134646', }, ], 'imported': True, 'status': '', 'version': '13.5', }, { 'name': 'LncBase', 'label': 'lncbase', 'url': 'http://www.microrna.gr/LncBase', 'description': 'experimentally verified and computationally predicted microRNA targets on long non-coding RNAs', 'hint': 'LncBase provides experimentally verified and computationally predicted microRNA targets on long non-coding RNAs', 'tags': ['automatic', 'curated', 'experimentally determined', 'miRNA', 'lncRNA'], 'abbreviation': '', 'examples': [ {'upi': 'URS000075EAB0', 'taxid': 9606}, {'upi': 'URS00005A4DCF', 'taxid': 10090}, {'upi': 'URS00003B7674', 'taxid': 9606}, ], 'references': [ { 'title': 'DIANA-LncBase v2: indexing microRNA targets on non-coding transcripts', 'authors': 'Paraskevopoulou MD, Vlachos IS, Karagkouni D, Georgakilas G, Kanellos I, Vergoulis T, Zagganas K, Tsanakas P, Floros E, Dalamagas T, Hatzigeorgiou AG', 'journal': 'Nucleic Acids Res 44(d1):D231-8 (2016)', 'pubmed_id': '26612864', } ], 'imported': True, 'status': '', 'version': 'v2', }, { 'name': 'LNCipedia', 'label': 'lncipedia', 'url': 'http://www.lncipedia.org/', 'description': 'is a comprehensive compendium of human long non-coding RNAs', 'hint': 'LNCipedia is a comprehensive compendium of human long non-coding RNAs', 'tags': ['automatic', 'human', 'lncRNA'], 'abbreviation': '', 'examples': [ {'upi': 'URS000081175C', 'taxid': 9606}, {'upi': 'URS0000812103', 'taxid': 9606}, {'upi': 'URS00001F1863', 'taxid': 9606}, ], 'references': [ { 'title': 'An update on LNCipedia: a database for annotated human lncRNA sequences', 'authors': 'Volders PJ, Verheggen K, Menschaert G, Vandepoele K, Martens L, Vandesompele J, Mestdagh P', 'journal': 'Nucleic Acids Res. 2015 Jan;43(Database issue):D174-80', 'pubmed_id': '25378313', }, ], 'imported': True, 'status': '', 'version': '5.2', }, { 'name': 'Modomics', 'label': 'modomics', 'url': 'http://modomics.genesilico.pl/', 'description': 'is a comprehensive database of RNA modifications', 'hint': 'Modomics is a comprehensive database of RNA modifications', 'tags': ['curated', 'RNA modifications', 'tRNA', 'rRNA'], 'abbreviation': '', 'examples': [ {'upi': 'URS00001BBAFC', 'taxid': 562}, {'upi': 'URS000019192F', 'taxid': 2102}, {'upi': 'URS000026426D', 'taxid': 9031}, ], 'references': [ { 'title': 'MODOMICS: a database of RNA modification pathways. 2017 update', 'authors': 'Boccaletto P, Machnicka MA, Purta E, Piatkowski P, Baginski B, Wirecki TK, de Crecy-Lagard V, Ross R, Limbach PA, Kotter A, Helm M, Bujnicki JM', 'journal': 'Nucleic Acids Res. 2018 Jan 4;46(D1):D303-D307', 'pubmed_id': '29106616', }, ], 'imported': True, 'status': '', 'version': '', }, { 'name': 'NONCODE', 'label': 'noncode', 'url': 'http://www.noncode.org/', 'description': 'is an integrated knowledge database dedicated to non-coding RNAs', 'hint': 'NONCODE is an integrated knowledge database dedicated to non-coding RNAs', 'tags': ['automatic', 'curated', 'lncRNA'], 'abbreviation': '', 'examples': [ {'upi': 'URS000019B796', 'taxid': 9606}, {'upi': 'URS00008189E4', 'taxid': 9606}, {'upi': 'URS000058E3EB', 'taxid': 9606}, ], 'references': [ { 'title': 'NONCODE 2016: an informative and valuable data source of long non-coding RNAs', 'authors': 'Zhao Y, Li H, Fang S, Kang Y, Wu W, Hao Y, Li Z, Bu D, Sun N, Zhang MQ, Chen R', 'journal': 'Nucleic Acids Res. 2016 Jan 4;44(D1):D203-8', 'pubmed_id': '26586799', }, ], 'imported': True, 'status': '', 'version': 'NONCODE2016', }, { 'name': 'NPInter', 'label': '', 'url': 'http://bioinfo.ibp.ac.cn/NPInter/', 'description': 'experimentally determined functional interactions between ncRNAs and proteins, mRNAs or genomic DNA', 'hint': 'NPInter contains data on experimentally determined functional interactions between ncRNAs and proteins, mRNAs or genomic DNA', 'tags': ['automatic', 'curated'], 'abbreviation': '', 'examples': '', 'references': [], 'imported': False, 'status': '', 'version': '', }, { 'name': 'piRBase', 'label': 'pirbase', 'url': 'http://www.regulatoryrna.org/database/piRNA/', 'description': 'a database of various piRNA associated data to support piRNA functional study ', 'hint': 'piRBase is a database of various piRNA associated data to support piRNA functional study', 'tags': ['automatic', 'curated', 'piRNA'], 'abbreviation': '', 'examples': [ {'upi': 'URS0000061ED0', 'taxid': 9606}, {'upi': 'URS0000298B36', 'taxid': 10090}, {'upi': 'URS00000FDFAF', 'taxid': 7227}, ], 'references': [ { 'title': 'piRBase: a comprehensive database of piRNA sequences', 'authors': 'Wang J, Zhang P, Lu Y, Li Y, Zheng Y, Kan Y, Chen R, He S', 'journal': 'Nucleic Acids Res. 2019 Jan 8;47(D1):D175-D180', 'pubmed_id': '30371818', }, ], 'imported': True, 'status': '', 'version': '2.0 (only sequences matching existing RNAcentral accessions have been imported)' }, { 'name': 'PLncDB', 'label': 'plncdb', 'url': 'http://chualab.rockefeller.edu/gbrowse2/homepage.html', 'description': 'provides comprehensive genomic view of Arabidopsis lncRNAs', 'hint': 'PLncDB provides comprehensive genomic view of Arabidopsis lncRNAs', 'tags': ['curated', 'Arabidopsis thaliana'], 'abbreviation': 'Plant Long Non-Coding DataBase', 'examples': [''], 'references': [ { 'title': 'PLncDB: plant long non-coding RNA database', 'authors': 'Jin J., Liu J., Wang H., Wong L., Chua N.H.', 'journal': 'Bioinformatics. 2013 Apr 15;29(8):1068-71', 'pubmed_id': '23476021', }, ], 'imported': False, 'status': '', 'version': '', }, { 'name': 'PomBase', 'label': 'pombase', 'url': 'http://www.pombase.org/', 'description': 'is a comprehensive database for the fission yeast Schizosaccharomyces pombe', 'hint': 'PomBase is a comprehensive database for the fission yeast Schizosaccharomyces pombe', 'tags': ['curated', 'model organism', 'yeast', 'Schizosaccharomyces pombe'], 'abbreviation': '', 'examples': [ {'upi': 'URS000044FEB9', 'taxid': 4896}, {'upi': 'URS00003F73E3', 'taxid': 4896}, {'upi': 'URS00002743E8', 'taxid': 4896}, ], 'references': [ { 'title': 'PomBase: a comprehensive online resource for fission yeast', 'authors': 'Wood V., Harris M.A., McDowall M.D., Rutherford K., Vaughan B.W., Staines D.M., Aslett M., Lock A., Bahler J., Kersey P.J., Oliver S.G.', 'journal': 'Nucleic Acids Res. 2012 Jan;40(Database issue):D695-9', 'pubmed_id': '22039153', }, ], 'imported': True, 'status': 'updated', 'version': 'as of 02 Sept 2021', }, { 'name': 'RNApathwaysDB', 'label': '', 'url': 'http://genesilico.pl/rnapathwaysdb', 'description': 'RNA maturation and decay pathways', 'hint': 'RNApathwaysDB contains RNA maturation and decay pathways', 'tags': ['curated', 'pathways'], 'abbreviation': '', 'examples': '', 'references': [], 'imported': False, 'status': '', 'version': '', }, { 'name': 'SILVA', 'label': 'silva', 'url': 'https://www.arb-silva.de/', 'description': 'is a comprehensive resource for quality checked and aligned ribosomal RNA sequence data', 'hint': 'SILVA is a comprehensive resource for quality checked and aligned ribosomal RNA sequence data', 'tags': ['semi-automatic', 'SSU rRNA', 'LSU rRNA'], 'abbreviation': '', 'examples': [ {'upi': 'URS00005A14E2', 'taxid': 9606}, {'upi': 'URS00004DD3DC', 'taxid': 511145}, {'upi': 'URS0000224E47', 'taxid': 10090}, ], 'references': [ { 'title': 'The SILVA ribosomal RNA gene database project: improved data processing and web-based tools', 'authors': 'Quast C., Pruesse E., Yilmaz P., Gerken J., Schweer T., Yarza P., Peplies J., Glockner F.O.', 'journal': 'Nucleic Acids Res. 2013 Jan;41(Database issue):D590-6', 'pubmed_id': '23193283', }, ], 'imported': True, 'status': '', 'version': 'r138.1', }, { 'name': 'SGD', 'label': 'sgd', 'url': 'http://yeastgenome.org/', 'description': 'provides comprehensive integrated biological information for the budding yeast', 'hint': 'SGD provides comprehensive integrated biological information for the budding yeast', 'tags': ['curated', 'model organism', 'yeast', 'Saccharomyces'], 'abbreviation': 'Saccharomyces Genome Database', 'examples': [ {'upi': 'URS0000224E47', 'taxid': 559292}, # HRA1 gene {'upi': 'URS00001CAAE9', 'taxid': 559292}, # SRP {'upi': 'URS0000077671', 'taxid': 559292}, # snoRNA ], 'references': [ { 'title': 'Saccharomyces Genome Database: the genomics resource of budding yeast', 'authors': 'Cherry J.M., Hong E.L., Amundsen C., Balakrishnan R., Binkley G., Chan E.T., Christie K.R., Costanzo M.C., Dwight S.S., Engel S.R. et al.', 'journal': 'Nucleic Acids Res. 2012 Jan;40(Database issue):D700-5', 'pubmed_id': '22110037', }, ], 'imported': True, 'status': 'updated', 'version': 'as of 03 Sept 2021', }, { 'name': 'snOPY', 'label': 'snopy', 'url': 'http://snoopy.med.miyazaki-u.ac.jp', 'description': "provides comprehensive information about snoRNAs, snoRNA gene loci, and target RNAs as well as information about snoRNA orthologues", 'hint': 'snOPY provides comprehensive information about snoRNAs, their gene loci, orthologues and their target RNAs', 'tags': ['curated', 'snoRNA'], 'abbreviation': 'snoRNA Orthological Gene Database', 'examples': [ {'upi': 'URS00004B0879', 'taxid': 3702}, {'upi': 'URS0000600DF1', 'taxid': 7227}, {'upi': 'URS000015A509', 'taxid': 7227}, ], 'references': [ { 'title': 'snOPY: a small nucleolar RNA orthological gene database', 'authors': 'Yoshihama M., Nakao A., Kenmochi N.', 'journal': 'BMC Res Notes 6:426-426(2013)', 'pubmed_id': '24148649', }, ], 'imported': True, 'status': '', 'version': '', }, { 'name': 'snoRNA Database', 'label': 'snornadb', 'url': 'http://lowelab.ucsc.edu/snoRNAdb/', 'description': 'is a curated collection of archaeal snoRNAs maintained by the Lowe Lab at UC Santa Cruz', 'hint': 'The snoRNA Database is a curated collection of archaeal snoRNAs maintained by the Lowe Lab at UC Santa Cruz', 'tags': ['automatic', 'curated', 'snoRNA'], 'abbreviation': '', 'examples': [ {'upi': 'URS0000600702', 'taxid': 340102}, {'upi': 'URS000020B9CF', 'taxid': 698757}, {'upi': 'URS00000A48A9', 'taxid': 698757}, ], 'references': [ { 'title': 'Homologs of small nucleolar RNAs in Archaea', 'authors': 'A D Omer, T M Lowe, A G Russell, H Ebhardt, S R Eddy, P P Dennis', 'journal': 'Science. 2000 Apr 21;288(5465):517-22', 'pubmed_id': '10775111', }, { 'title': 'Archaeal homologs of eukaryotic methylation guide small nucleolar RNAs: lessons from the Pyrococcus genomes', 'authors': 'C Gaspin, J Cavaille, G Erauso, J P Bachellerie', 'journal': 'J Mol Biol. 2000 Apr 7;297(4):895-906', 'pubmed_id': '10736225', }, { 'title': 'Methylation guide RNA evolution in archaea: structure, function and genomic organization of 110 C/D box sRNA families across six Pyrobaculum species', 'authors': 'Lauren M Lui, Andrew V Uzilov, David L Bernick, Andrea Corredor, Todd M Lowe, Patrick P Dennis', 'journal': 'Nucleic Acids Res. 2018 Jun 20;46(11):5678-5691', 'pubmed_id': '29771354', }, { 'title': 'Diversity of Antisense and Other Non-Coding RNAs in Archaea Revealed by Comparative Small RNA Sequencing in Four Pyrobaculum Species', 'authors': 'David L Bernick, Patrick P Dennis, Lauren M Lui, Todd M Lowe', 'journal': 'Front Microbiol. 2012 Jul 2;3:231', 'pubmed_id': '22783241', }, { 'title': 'Complete genome sequence of Pyrobaculum oguniense', 'authors': 'David L Bernick, Kevin Karplus, Lauren M Lui, Joanna K C Coker, Julie N Murphy, Patricia P Chan, Aaron E Cozen, Todd M Lowe', 'journal': 'Stand Genomic Sci. 2012 Jul 30;6(3):336-45', 'pubmed_id': '23407329', }, ], 'imported': True, 'status': '', 'version': '', }, { 'name': 'sRNAmap', 'label': '', 'url': 'http://srnamap.mbc.nctu.edu.tw/', 'description': 'a collection of sRNA sequences and interactions', 'hint': 'sRNAmap is a collection of sRNA sequences and interactions', 'tags': ['curated', 'sRNA'], 'abbreviation': '', 'examples': '', 'references': [], 'imported': False, 'status': '', 'version': '', }, { 'name': 'TarBase', 'label': 'tarbase', 'url': 'http://www.microrna.gr/tarbase', 'description': 'a collection of manually curated experimentally validated miRNA-gene interactions', 'hint': 'TarBase is a collection of manually curated experimentally validated miRNA-gene interactions', 'tags': ['curated', 'miRNA', 'gene', 'interactions'], 'abbreviation': '', 'examples': [ {'upi': 'URS0000021B51', 'taxid': 10090}, {'upi': 'URS00001DC04F', 'taxid': 10090}, {'upi': 'URS00004BCD9C', 'taxid': 9606}, ], 'references': [ { 'title': 'DIANA-TarBase v8: a decade-long collection of experimentally supported miRNA-gene interactions', 'authors': 'Karagkouni D., Paraskevopoulou MD., Chatzopoulos S., Vlachos IS., Tastsoglou S., Kanellos I., Papadimitriou D., Kavakiotis I., Maniou S., Skoufos G., Vergoulis T., Dalamagas T., Hatzigeorgiou AG', 'journal': 'Nucleic Acids Res. 2018 Jan 4;46(D1):D239-D245', 'pubmed_id': '29156006', }, ], 'imported': True, 'status': '', 'version': 'v8', }, { 'name': 'tmRDB', 'label': '', 'url': 'http://rth.dk/resources/rnp/tmRDB/', 'description': 'aligned, annotated and phylogenetically ordered sequences related to structure and function of tmRNA', 'hint': 'tmRDB is a collection of aligned, annotated and phylogenetically ordered sequences related to structure and function of tmRNA', 'tags': ['curated', 'tmRNA'], 'abbreviation': '', 'examples': '', 'references': [], 'imported': False, 'status': '', 'version': '', }, { 'name': 'tRNAdb', 'label': '', 'url': 'http://trna.bioinf.uni-leipzig.de/DataOutput/', 'description': 'compilation of tRNA sequences and tRNA genes', 'hint': 'tRNAdb is a compilation of tRNA sequences and tRNA genes.', 'tags': ['curated', 'tRNA'], 'description': 'compilation of tRNA sequences and tRNA genes', 'abbreviation': '', 'examples': '', 'references': [], 'imported': False, 'status': '', 'version': '', }, { 'name': 'WormBase', 'label': 'wormbase', 'url': 'http://www.wormbase.org/', 'description': "curates, stores and displays genomic and genetic data about nematodes with primary emphasis on <em>C. elegans</em> and related nematodes", 'hint': 'WormBase curates, stores and displays genomic and genetic data about nematodes with primary emphasis on C. elegans and related nematodes', 'tags': ['curated', 'model organism', 'nematode', 'Caenorhabditis elegans'], 'abbreviation': '', 'examples': [ {'upi': 'URS000022A09E', 'taxid': 6239}, # miRNA {'upi': 'URS00001218EE', 'taxid': 6239}, # rRNA {'upi': 'URS00003E1CE3', 'taxid': 6239}, # snoRNA ], 'references': [ { 'title': 'WormBase 2012: more genomes, more data, new website', 'authors': 'Yook K., Harris TW., Bieri T., Cabunoc A., Chan J., Chen WJ., Davis P., de la Cruz N., Duong A., Fang R. et al.', 'journal': 'Nucleic Acids Res. 2012 Jan;40(Database issue):D735-41', 'pubmed_id': '22067452', }, ], 'imported': True, 'status': 'updated', 'version': 'WS270', }, { 'name': 'MGI', 'label': 'mgi', 'url': 'http://www.informatics.jax.org/', 'description': 'is the international database resource for the laboratory mouse', 'hint': 'MGI is the international database resource for the laboratory mouse', 'tags': ['curated', 'model organism', 'mouse', 'Mus musculus'], 'abbreviation': '', 'examples': [ {'upi': 'URS00009AEDBB', 'taxid': 10090}, {'upi': 'URS00009B2FC2', 'taxid': 10090}, {'upi': 'URS00007742B3', 'taxid': 10090}, ], 'references': [ { 'title': 'Mouse Genome Database (MGD)-2017: community knowledge resource for the laboratory mouse', 'authors': 'Blake JA, Eppig JT, Kadin JA, Richardson JE, Smith CL, Bult CJ; the Mouse Genome Database Group', 'journal': 'Nucleic Acids Res. 2017 Jan 4;45(D1):D723-D729', 'pubmed_id': '27899570', } ], 'imported': True, 'status': '', 'version': 'MGI 6.10', }, { 'name': 'RGD', 'label': 'rgd', 'url': 'http://rgd.mcw.edu/', 'description': 'a collaborative effort between leading research institutions involved in rat genetic and genomic research', 'hint': 'RGD is a rat genetic and genomic research resource', 'tags': ['curated', 'model organism', 'rat', 'Rattus norvegicus'], 'abbreviation': 'Rat Genome Database', 'examples': [ {'upi': 'URS000075AA07', 'taxid': 10116}, # Miat {'upi': 'URS00004B2C76', 'taxid': 10116}, # 5.8S rRNA {'upi': 'URS000075C796', 'taxid': 10116}, # SRP ], 'references': [ { 'title': 'The Rat Genome Database 2015: genomic, phenotypic and environmental variations and disease', 'authors': 'Shimoyama M, De Pons J, Hayman GT, Laulederkind SJ, Liu W, Nigam R, Petri V, Smith JR, Tutaj M, Wang SJ, Worthey E, Dwinell M, Jacob H.', 'journal': 'Nucleic Acids Res. 2015 Jan 28;43(Database issue):D743-50', 'pubmed_id': '25355511', } ], 'imported': True, 'status': '', 'version': 'as of March 2018', }, { 'name': 'TAIR', 'label': 'tair', 'url': 'http://www.arabidopsis.org/', 'description': 'is a database of genetic and molecular biology data for the model higher plant Arabidopsis thaliana', 'hint': 'TAIR is a database of genetic and molecular biology data for the model higher plant Arabidopsis thaliana', 'tags': ['curated', 'model organism', 'Arabidopsis thaliana'], 'abbreviation': 'The Arabidopsis Information Resource', 'examples': [ {'upi': 'URS0000591E4F', 'taxid': 3702}, # tRNA {'upi': 'URS000008172F', 'taxid': 3702}, # rRNA {'upi': 'URS000035F1B7', 'taxid': 3702}, # snoRNA ], 'references': [ { 'title': 'The Arabidopsis Information Resource (TAIR): improved gene annotation and new tools', 'authors': 'Lamesch P., Berardini T.Z., Li D., Swarbreck D., Wilks C., Sasidharan R., Muller R., Dreher K., Alexander D.L., Garcia-Hernandez M., Karthikeyan A.S. et al.', 'journal': 'Nucleic Acids Res. 2012 Jan;40(Database issue):D1202-10', 'pubmed_id': '22140109', }, ], 'imported': True, 'status': '', 'version': 'TAIR10', }, { 'name': 'dictyBase', 'label': 'dictybase', 'url': 'http://dictybase.org/', 'description': 'is the model organism database for the social amoeba Dictyostelium discoideum', 'hint': 'dictyBase is the model organism database for the social amoeba Dictyostelium discoideum', 'tags': ['curated', 'model organism', 'Dicytostelium discoideum'], 'abbreviation': '', 'examples': [ {'upi': 'URS00003BBB9E', 'taxid': 352472}, {'upi': 'URS0000235EB0', 'taxid': 352472}, {'upi': 'URS00004A9A20', 'taxid': 352472}, ], 'references': [ { 'title': 'DictyBase 2013: integrating multiple Dictyostelid species', 'authors': 'Basu S, Fey P, Pandit Y, Dodson R, Kibbe WA, Chisholm RL', 'journal': 'Nucleic Acids Res. 2013 Jan;41(Database issue):D676-83', 'pubmed_id': '23172289', }, ], 'imported': True, 'status': '', 'version': '', }, { 'name': '5SrRNAdb', 'label': '5srrnadb', 'url': 'http://combio.pl/rrna/', 'description': 'is an information resource for 5S ribosomal RNAs', 'hint': '5SrRNAdb is an information resource for 5S ribosomal RNAs', 'tags': ['curated', '5S', 'rRNA'], 'abbreviation': '', 'examples': [ {'upi': 'URS000002B0D5', 'taxid': 9606}, {'upi': 'URS000002B0D5', 'taxid': 10090}, ], 'references': [ { 'title': '5SRNAdb: an information resource for 5S ribosomal RNAs', 'authors': 'Szymanski M, Zielezinski A, Barciszewski J, Erdmann VA, Karlowski WM', 'journal': 'Nucleic Acids Res. 2015 Oct 20. pii: gkv1081', 'pubmed_id': '26490961', }, ], 'imported': True, 'status': '', 'version': '17', }, { 'name': 'miRTarBase', 'label': 'mirtarbase', 'url': 'http://mirtarbase.mbc.nctu.edu.tw', 'description': 'is an experimentally validated microRNA-target interactions database', 'hint': 'miRTarBawse is an experimentally validated microRNA-target interactions database', 'tags': ['curated', 'experimentally determined', 'miRNA', 'interactions'], 'abbreviation': '', 'examples': [], 'references': [ { 'title': 'miRTarBase 2016: updates to the eimentally validated miRNA-target interactions database', 'authors': 'Chou et al', 'journal': 'Nucleic Acids Res. 2016 Jan 4;44(D1):D239-47', 'pubmed_id': '26590260', }, ], 'imported': False, 'status': '', 'version': '', }, { 'name': 'LncBook', 'label': 'lncbook', 'url': 'http://bigd.big.ac.cn/lncbook', 'description': 'is a curated knowledgebase of human long non-coding RNAs', 'hint': 'LncBook is a curated knowledgebase of human long non-coding RNAs', 'tags': ['community curated', 'human', 'lncRNA'], 'abbreviation': '', 'examples': [ {'upi': 'URS00003E9E7E', 'taxid': 9606}, {'upi': 'URS000075E1E7', 'taxid': 9606}, {'upi': 'URS0000050347', 'taxid': 9606}, ], 'references': [ { 'title': 'LncBook: a curated knowledgebase of human long non-coding RNAs', 'authors': 'Ma L, Cao J, Liu L, Du Q, Li Z, Zou D, Bajic VB, Zhang Z', 'journal': 'Nucleic Acids Res. 2019 Jan 8;47(D1):D128-D134', 'pubmed_id': '30329098', }, ], 'imported': True, 'status': '', 'version': '1.0', }, { 'name': 'ZWD', 'label': 'zwd', 'url': 'https://bitbucket.org/zashaw/zashaweinbergdata', 'description': 'is a git-based collection of non-coding RNA alignments maintained by Dr Zasha Weinberg', 'hint': 'ZWD is a git-based collection of non-coding RNA alignments maintained by Dr Zasha Weinberg', 'tags': ['metagenome', 'predicted', 'riboswitch'], 'abbreviation': '', 'examples': [ {'upi': 'URS000065A032', 'taxid': 224308}, {'upi': 'URS000067336D', 'taxid': 264730}, {'upi': 'URS0000D66279', 'taxid': 997891}, ], 'references': [ { 'title': 'Detection of 224 candidate structured RNAs by comparative analysis of specific subsets of intergenic regions', 'authors': 'Weinberg Z, Lunse CE, Corbino KA, Ames TD, Nelson JW, Roth A, Perkins KR, Sherlock ME, Breaker RR', 'journal': 'Nucleic Acids Res. 2017 Oct 13;45(18):10811-10823', 'pubmed_id': '28977401', }, ], 'imported': True, 'status': 'updated', 'version': '1.2', }, { 'name': 'snoDB', 'label': 'snodb', 'url': 'http://scottgroup.med.usherbrooke.ca/snoDB/', 'description': 'is an interactive database of human snoRNA sequences, abundance and interactions', 'hint': 'snoDB is an interactive database of human snoRNA sequences, abundance and interactions', 'tags': ['snoRNA', 'curated', 'human'], 'abbreviation': '', 'examples': [ {'upi': 'URS000071F072', 'taxid': 9606}, {'upi': 'URS0000726F61', 'taxid': 9606}, {'upi': 'URS00005D7632', 'taxid': 9606}, ], 'references': [ { 'title': 'snoDB: an interactive database of human snoRNA sequences, abundance and interactions', 'authors': 'Bouchard-Bourelle P, Desjardins-Henri C, Mathurin-St-Pierre D, Deschamps-Francoeur G, Fafard-Couture E, Garant JM, Elela SA, Scott MS', 'journal': 'Nucleic Acids Res. 2019 Oct 10. pii: gkz884', 'pubmed_id': '31598696', }, ], 'imported': True, 'status': '', 'version': '1.1.0', }, { 'name': 'MirGeneDB', 'label': 'mirgenedb', 'url': 'https://mirgenedb.org', 'description': 'is a curated microRNA gene database covering 45 metazoan organisms', 'hint': 'MirGeneDB is a curated microRNA gene database covering 45 metazoan organisms', 'tags': ['miRNA', 'curated'], 'abbreviation': '', 'examples': [ {'upi': 'URS00000157F5', 'taxid': 9606}, {'upi': 'URS000075DE8D', 'taxid': 10090}, {'upi': 'URS0000416056', 'taxid': 7955}, ], 'references': [ { 'title': 'MirGeneDB 2.0: the metazoan microRNA complement', 'authors': 'Fromm B, Domanska D, Hoye E, Ovchinnikov V, Kang W, Aparicio-Puerta E, Johansen M, Flatmark K, Mathelier A, Hovig E, Hackenberg M, Friedlander MR, Peterson KJ', 'journal': 'Nucleic Acids Res. 2019 Oct 23', 'pubmed_id': '31642479', }, ], 'imported': True, 'status': '', 'version': '2.0', }, { 'name': 'MalaCards', 'label': 'malacards', 'url': 'https://www.malacards.org/', 'description': 'integrates manually-curated and text-mining sources to associate genes, including ncRNAs, with diseases, and lists the supporting evidence', 'hint': 'MalaCards integrates manually-curated and text-mining sources to associate genes, including ncRNAs, with diseases, and lists the supporting evidence', 'tags': ['disease', 'human'], 'abbreviation': '', 'examples': [ {'upi': 'URS0000EBFCE3', 'taxid': 9606}, {'upi': 'URS0000EBF55E', 'taxid': 9606}, {'upi': 'URS0000EBF67F', 'taxid': 9606}, ], 'references': [ { 'title': 'MalaCards: an amalgamated human disease compendium with diverse clinical and genetic annotation and structured search', 'authors': 'Rappaport N, Twik M, Plaschkes I, Nudel R, Iny Stein T, Levitt J, Gershoni M, Morrey CP, Safran M, Lancet D', 'journal': 'Nucleic Acids Res. 2017 Jan 4;45(D1):D877-D887', 'pubmed_id': '27899610', }, ], 'imported': True, 'status': 'updated', 'version': '5.5', }, { 'name': 'GeneCards', 'label': 'genecards', 'url': 'https://www.genecards.org/', 'description': 'is a searchable, integrative database that provides comprehensive, user-friendly information on all annotated and predicted human genes', 'hint': 'GeneCards is a searchable, integrative database that provides comprehensive, user-friendly information on all annotated and predicted human genes', 'tags': ['human', 'RNA gene'], 'abbreviation': '', 'examples': [ {'upi': 'URS0000EBFCE3', 'taxid': 9606}, {'upi': 'URS0000EBF55E', 'taxid': 9606}, {'upi': 'URS0000EBF67F', 'taxid': 9606}, ], 'references': [ { 'title': 'The GeneCards Suite: From Gene Data Mining to Disease Genome Sequence Analyses', 'authors': 'Stelzer G, Rosen N, Plaschkes I, Zimmerman S, Twik M, Fishilevich S, Stein T, Nudel R, Lieder I, Mazor Y, Kaplan S, Dahary D, Warshawsky D, Guan-Golan Y, Kohn A, Rappaport N, Safran M, Lancet D', 'journal': 'Curr Protoc Bioinformatics. 2016 Jun 20;54:1.30.1-1.30.33', 'pubmed_id': '27322403', }, ], 'imported': True, 'status': 'updated', 'version': '5.5', }, { 'name': 'CRS', 'label': 'crs', 'url': 'https://rth.dk/resources/rnannotator/crs/vert/', 'description': 'is a database of conserved RNA motifs identified computationally in multi-species vertebrate alignments using 2D structure', 'hint': 'CRS is a database of conserved RNA motifs identified computationally in multi-species vertebrate alignments using 2D structure', 'tags': ['predicted'], 'abbreviation': '', 'examples': [ {'upi': 'URS0000759F81', 'taxid': 9606}, {'upi': 'URS0000A7D0F3', 'taxid': 10090}, ], 'references': [ { 'title': 'The identification and functional annotation of RNA structures conserved in vertebrates', 'authors': 'Seemann SE, Mirza AH, Hansen C, Bang-Berthelsen CH, Garde C, Christensen-Dalsgaard M, Torarinsson E, Yao Z, Workman CT, Pociot F, Nielsen H, Tommerup N, Ruzzo WL, Gorodkin J', 'journal': 'Genome Res. 2017 Aug;27(8):1371-1383', 'pubmed_id': '28487280', }, ], 'imported': True, 'status': '', 'version': '2.1', }, { 'name': 'IntAct', 'label': 'intact', 'url': 'https://www.ebi.ac.uk/intact', 'description': 'provides a freely available, open source database system and analysis tools for molecular interaction data. All interactions are derived from literature curation or direct user submissions', 'hint': 'IntAct provides a freely available, open source database system and analysis tools for molecular interaction data. All interactions are derived from literature curation or direct user submissions', 'tags': ['curated', 'interaction', 'RNA-protein'], 'abbreviation': '', 'examples': [ {'upi': 'URS000075DAEC', 'taxid': 9606}, # human NEAT1 {'upi': 'URS0000723DBB', 'taxid': 10090}, # mouse miRNA {'upi': 'URS00002BC0C6', 'taxid': 559292}, # yeast snoRNA ], 'references': [ { 'title': 'The MIntAct project--IntAct as a common curation platform for 11 molecular interaction databases', 'authors': 'Orchard S, Ammari M, Aranda B, Breuza L, Briganti L, Broackes-Carter F, Campbell NH, Chavali G, Chen C, del-Toro N et al.', 'journal': 'Nucleic Acids Res. 2014 Jan;42(Database issue):D358-63', 'pubmed_id': '24234451', }, ], 'imported': True, 'status': 'updated', 'version': 'as of 03 Sept 2021', }, { 'name': 'ZFIN', 'label': 'zfin', 'url': 'https://zfin.org', 'description': 'is the database of genetic and genomic data for the zebrafish (Danio rerio) as a model organism', 'hint': 'The Zebrafish Information Network (ZFIN) is the database of genetic and genomic data for the zebrafish (Danio rerio) as a model organism', 'tags': ['curated', 'model organism', 'zebrafish'], 'abbreviation': 'The Zebrafish Information Network', 'examples': [ {'upi': 'URS00003B6A21', 'taxid': 7955}, # mir196c {'upi': 'URS00008E3972', 'taxid': 7955}, # linc.alien {'upi': 'URS0000A8261D', 'taxid': 7955}, # dre-let-7a-1 ], 'references': [ { 'title': 'The Zebrafish Information Network: new support for non-coding genes, richer Gene Ontology annotations and the Alliance of Genome Resources', 'authors': 'Leyla Ruzicka, Douglas G Howe, Sridhar Ramachandran, Sabrina Toro, Ceri E Van Slyke, Yvonne M Bradford, Anne Eagle, David Fashena, Ken Frazer, Patrick Kalita, Prita Mani, Ryan Martin, Sierra Taylor Moxon, Holly Paddock, Christian Pich, Kevin Schaper, Xiang Shao, Amy Singer, Monte Westerfield', 'journal': 'Nucleic Acids Res. 2019 Jan 8;47(D1):D867-D873', 'pubmed_id': '30407545', }, ], 'imported': True, 'status': '', 'version': 'as of 22 April 2021', }, { 'name': 'snoRNA Atlas', 'label': 'snoatlas', 'url': 'http://snoatlas.bioinf.uni-leipzig.de/', 'description': '', 'hint': 'snoRNA Atlas is a database of human snoRNAs', 'tags': ['', '', ''], 'abbreviation': '', 'examples': [], 'references': [], 'imported': False, 'status': '', 'version': '', }, { 'name': 'PSICQUIC', 'label': 'psicquic', 'url': 'http://www.ebi.ac.uk/Tools/webservices/psicquic/view/home.xhtml', 'description': 'provides computational access to molecular-interaction data. miRNA annotations are a collaboration between the UCL functional gene annotation team and the UniProt-GOA group at the EBI and is funded by the British Heart Foundation', 'hint': 'A database of manually annotated human miRNA interactions', 'tags': ['curated', '', ''], 'abbreviation': 'PSICQUIC', 'examples': [ {'upi': 'URS00005A4DCF', 'taxid': 9606}, # hsa-miR-125a-5p {'upi': 'URS00005BBC98', 'taxid': 9606}, # hsa-mir-183 precursor {'upi': 'URS0000D54CAD', 'taxid': 9606}, # hsa-miR-155-5p ], 'references': [ { 'title': 'A new reference implementation of the PSICQUIC web service', 'authors': 'Noemi del-Toro, Marine Dumousseau, Sandra Orchard, Rafael C Jimenez, Eugenia Galeota, Guillaume Launay, Johannes Goll, Karin Breuer, Keiichiro Ono, Lukasz Salwinski, Henning Hermjakob', 'journal': 'Nucleic Acids Res. 2013 Jul;41(Web Server issue):W601-6', 'pubmed_id': '23671334', } ], 'imported': True, 'status': 'New', 'version': 'as of 03 Sept 2021', } ]
RNAcentral/rnacentral-webcode
rnacentral/portal/config/expert_databases.py
Python
apache-2.0
65,637
[ "Brian" ]
5a43a21e8a1786171d6208d6d8a7a70c799977f14ab4701130497eafa9f3e7db
""" Tests for discussion pages """ from uuid import uuid4 import pytest from common.test.acceptance.fixtures.course import CourseFixture, XBlockFixtureDesc # lint-amnesty, pylint: disable=unused-import from common.test.acceptance.fixtures.discussion import ( Comment, Response, SingleThreadViewFixture, Thread, ) from common.test.acceptance.pages.common.auto_auth import AutoAuthPage from common.test.acceptance.pages.lms.discussion import ( DiscussionTabHomePage, DiscussionTabSingleThreadPage, ) from common.test.acceptance.tests.discussion.helpers import BaseDiscussionMixin, BaseDiscussionTestCase from common.test.acceptance.tests.helpers import UniqueCourseTest from openedx.core.lib.tests import attr THREAD_CONTENT_WITH_LATEX = u"""Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt # lint-amnesty, pylint: disable=line-too-long ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit sse cillum dolore eu fugiat nulla pariatur. \n\n----------\n\nLorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit sse cillum dolore eu fugiat nulla pariatur. (b).\n\n **(a)** $H_1(e^{j\\omega}) = \\sum_{n=-\\infty}^{\\infty}h_1[n]e^{-j\\omega n} = \\sum_{n=-\\infty} ^{\\infty}h[n]e^{-j\\omega n}+\\delta_2e^{-j\\omega n_0}$ $= H(e^{j\\omega})+\\delta_2e^{-j\\omega n_0}=A_e (e^{j\\omega}) e^{-j\\omega n_0} +\\delta_2e^{-j\\omega n_0}=e^{-j\\omega n_0} (A_e(e^{j\\omega})+\\delta_2) $H_3(e^{j\\omega})=A_e(e^{j\\omega})+\\delta_2$. Dummy $A_e(e^{j\\omega})$ dummy post $. $A_e(e^{j\\omega}) \\ge -\\delta_2$, it follows that $H_3(e^{j\\omega})$ is real and $H_3(e^{j\\omega})\\ge 0$.\n\n**(b)** Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit sse cillum dolore eu fugiat nulla pariatur.\n\n **Case 1:** If $re^{j\\theta}$ is a Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit sse cillum dolore eu fugiat nulla pariatur. \n\n**Case 3:** Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit sse cillum dolore eu fugiat nulla pariatur. Lorem $H_3(e^{j\\omega}) = P(cos\\omega)(cos\\omega - cos\\theta)^k$, Lorem Lorem Lorem Lorem Lorem Lorem $P(cos\\omega)$ has no $(cos\\omega - cos\\theta)$ factor. Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit sse cillum dolore eu fugiat nulla pariatur. $P(cos\\theta) \\neq 0$. Since $P(cos\\omega)$ this is a dummy data post $\\omega$, dummy $\\delta > 0$ such that for all $\\omega$ dummy $|\\omega - \\theta| < \\delta$, $P(cos\\omega)$ Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit sse cillum dolore Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit sse cillum dolore eu fugiat nulla pariatur. Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit sse cillum dolore eu fugiat nulla pariatur. Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit sse cillum dolore eu fugiat nulla pariatur. Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit sse cillum dolore eu fugiat nulla pariatur. Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit sse cillum dolore eu fugiat nulla pariatur. """ @attr(shard=2) class DiscussionHomePageTest(BaseDiscussionTestCase): """ Tests for the discussion home page. """ SEARCHED_USERNAME = "gizmo" def setUp(self): super(DiscussionHomePageTest, self).setUp() # lint-amnesty, pylint: disable=super-with-arguments AutoAuthPage(self.browser, course_id=self.course_id).visit() self.page = DiscussionTabHomePage(self.browser, self.course_id) self.page.visit() @attr('a11y') def test_page_accessibility(self): self.page.a11y_audit.config.set_rules({ "ignore": [ 'section', # TODO: AC-491 'aria-required-children', # TODO: AC-534 'aria-valid-attr', # TODO: LEARNER-6611 & LEARNER-6865 'region' # TODO: AC-932 ] }) self.page.a11y_audit.check_for_accessibility_errors() class DiscussionTabMultipleThreadTest(BaseDiscussionTestCase, BaseDiscussionMixin): """ Tests for the discussion page with multiple threads """ def setUp(self): super(DiscussionTabMultipleThreadTest, self).setUp() # lint-amnesty, pylint: disable=super-with-arguments AutoAuthPage(self.browser, course_id=self.course_id).visit() self.thread_count = 2 self.thread_ids = [] self.setup_multiple_threads(thread_count=self.thread_count) self.thread_page_1 = DiscussionTabSingleThreadPage( self.browser, self.course_id, self.discussion_id, self.thread_ids[0] ) self.thread_page_2 = DiscussionTabSingleThreadPage( self.browser, self.course_id, self.discussion_id, self.thread_ids[1] ) self.thread_page_1.visit() @attr('a11y') def test_page_accessibility(self): self.thread_page_1.a11y_audit.config.set_rules({ "ignore": [ 'section', # TODO: AC-491 'aria-required-children', # TODO: AC-534 'aria-valid-attr', # TODO: LEARNER-6611 & LEARNER-6865 'region', # TODO: AC-932 ] }) self.thread_page_1.a11y_audit.check_for_accessibility_errors() self.thread_page_2.a11y_audit.config.set_rules({ "ignore": [ 'section', # TODO: AC-491 'aria-required-children', # TODO: AC-534 'region' # TODO: AC-932 ] }) self.thread_page_2.a11y_audit.check_for_accessibility_errors() class DiscussionOpenClosedThreadTest(BaseDiscussionTestCase): """ Tests for checking the display of attributes on open and closed threads """ def setUp(self): super(DiscussionOpenClosedThreadTest, self).setUp() # lint-amnesty, pylint: disable=super-with-arguments self.thread_id = "test_thread_{}".format(uuid4().hex) def setup_user(self, roles=[]): # lint-amnesty, pylint: disable=dangerous-default-value roles_str = ','.join(roles) self.user_id = AutoAuthPage(self.browser, course_id=self.course_id, roles=roles_str).visit().get_user_id() # lint-amnesty, pylint: disable=attribute-defined-outside-init def setup_view(self, **thread_kwargs): # lint-amnesty, pylint: disable=missing-function-docstring thread_kwargs.update({'commentable_id': self.discussion_id}) view = SingleThreadViewFixture( Thread(id=self.thread_id, **thread_kwargs) ) view.addResponse(Response(id="response1")) view.push() def setup_openclosed_thread_page(self, closed=False): # lint-amnesty, pylint: disable=missing-function-docstring self.setup_user(roles=['Moderator']) if closed: self.setup_view(closed=True) else: self.setup_view() page = self.create_single_thread_page(self.thread_id) page.visit() page.close_open_thread() return page @attr('a11y') def test_page_accessibility(self): page = self.setup_openclosed_thread_page() page.a11y_audit.config.set_rules({ 'ignore': [ 'section', # TODO: AC-491 'aria-required-children', # TODO: AC-534 'color-contrast', # Commented out for now because they reproducibly fail on Jenkins but not locally 'aria-valid-attr', # TODO: LEARNER-6611 & LEARNER-6865 'region', # TODO: AC-932 ] }) page.a11y_audit.check_for_accessibility_errors() page = self.setup_openclosed_thread_page(True) page.a11y_audit.config.set_rules({ 'ignore': [ 'section', # TODO: AC-491 'aria-required-children', # TODO: AC-534 'color-contrast', # Commented out for now because they reproducibly fail on Jenkins but not locally 'aria-valid-attr', # TODO: LEARNER-6611 & LEARNER-6865 'region', # TODO: AC-932 ] }) page.a11y_audit.check_for_accessibility_errors() class DiscussionResponseEditTest(BaseDiscussionTestCase): """ Tests for editing responses displayed beneath thread in the single thread view. """ def setup_user(self, roles=[]): # lint-amnesty, pylint: disable=dangerous-default-value roles_str = ','.join(roles) self.user_id = AutoAuthPage(self.browser, course_id=self.course_id, roles=roles_str).visit().get_user_id() # lint-amnesty, pylint: disable=attribute-defined-outside-init def setup_view(self): # lint-amnesty, pylint: disable=missing-function-docstring view = SingleThreadViewFixture(Thread(id="response_edit_test_thread", commentable_id=self.discussion_id)) view.addResponse( Response(id="response_other_author", user_id="other", thread_id="response_edit_test_thread"), ) view.addResponse( Response(id="response_self_author", user_id=self.user_id, thread_id="response_edit_test_thread"), ) view.push() @attr('a11y') def test_page_accessibility(self): self.setup_user() self.setup_view() page = self.create_single_thread_page("response_edit_test_thread") page.a11y_audit.config.set_rules({ 'ignore': [ 'section', # TODO: AC-491 'aria-required-children', # TODO: AC-534 'aria-valid-attr', # TODO: LEARNER-6611 & LEARNER-6865 'region', # TODO: AC-932 ] }) page.visit() page.a11y_audit.check_for_accessibility_errors() class DiscussionCommentEditTest(BaseDiscussionTestCase): """ Tests for editing comments displayed beneath responses in the single thread view. """ def setup_user(self, roles=[]): # lint-amnesty, pylint: disable=dangerous-default-value roles_str = ','.join(roles) self.user_id = AutoAuthPage(self.browser, course_id=self.course_id, roles=roles_str).visit().get_user_id() # lint-amnesty, pylint: disable=attribute-defined-outside-init def setup_view(self): # lint-amnesty, pylint: disable=missing-function-docstring view = SingleThreadViewFixture(Thread(id="comment_edit_test_thread", commentable_id=self.discussion_id)) view.addResponse( Response(id="response1"), [Comment(id="comment_other_author", user_id="other"), Comment(id="comment_self_author", user_id=self.user_id)]) # lint-amnesty, pylint: disable=line-too-long view.push() @attr('a11y') def test_page_accessibility(self): self.setup_user() self.setup_view() page = self.create_single_thread_page("comment_edit_test_thread") page.visit() page.a11y_audit.config.set_rules({ 'ignore': [ 'section', # TODO: AC-491 'aria-required-children', # TODO: AC-534 'aria-valid-attr', # TODO: LEARNER-6611 & LEARNER-6865 'region', # TODO: AC-932 ] }) page.a11y_audit.check_for_accessibility_errors() @attr('a11y') @pytest.mark.skip(reason='This test is too flaky to run at all. TNL-6215') def test_inline_a11y(self): """ Tests Inline Discussion for accessibility issues. """ self.setup_multiple_threads(thread_count=3) # First test the a11y of the expanded list of threads self.discussion_page.expand_discussion() self.discussion_page.a11y_audit.config.set_rules({ 'ignore': [ 'section' ] }) self.discussion_page.a11y_audit.check_for_accessibility_errors() # Now show the first thread and test the a11y again self.discussion_page.show_thread(self.thread_ids[0]) self.discussion_page.a11y_audit.check_for_accessibility_errors() # Finally show the new post form and test its a11y self.discussion_page.click_new_post_button() self.discussion_page.a11y_audit.check_for_accessibility_errors() class DiscussionSearchAlertTest(UniqueCourseTest): """ Tests for spawning and dismissing alerts related to user search actions and their results. """ SEARCHED_USERNAME = "gizmo" def setUp(self): super(DiscussionSearchAlertTest, self).setUp() # lint-amnesty, pylint: disable=super-with-arguments CourseFixture(**self.course_info).install() # first auto auth call sets up a user that we will search for in some tests self.searched_user_id = AutoAuthPage( self.browser, username=self.SEARCHED_USERNAME, course_id=self.course_id ).visit().get_user_id() # this auto auth call creates the actual session user AutoAuthPage(self.browser, course_id=self.course_id).visit() self.page = DiscussionTabHomePage(self.browser, self.course_id) self.page.visit() @attr('a11y') def test_page_accessibility(self): self.page.a11y_audit.config.set_rules({ 'ignore': [ 'section', # TODO: AC-491 'aria-required-children', # TODO: AC-534 'aria-valid-attr', # TODO: LEARNER-6611 & LEARNER-6865 'region', # TODO: AC-932 ] }) self.page.a11y_audit.check_for_accessibility_errors()
stvstnfrd/edx-platform
common/test/acceptance/tests/discussion/test_discussion.py
Python
agpl-3.0
18,013
[ "VisIt" ]
23b4043c0a3280cdfbd74ca6fd562bb9cdc3482daa40a6bcfd3cab41edee2f9b
import logging import sys import pytest import rasterio from rasterio.profiles import default_gtiff_profile from rasterio.vfs import parse_path, vsi_path logging.basicConfig(stream=sys.stderr, level=logging.DEBUG) def test_parse_path_with_vfs(): """Correctly parse path with legacy vfs parameter""" assert parse_path('foo.tif', vfs='zip://tests/data/files.zip') == ( 'foo.tif', 'tests/data/files.zip', 'zip') def test_parse_path_zip(): """Correctly parse VFS scheme URL""" assert parse_path('zip://tests/data/files.zip!foo.tif') == ( 'foo.tif', 'tests/data/files.zip', 'zip') def test_parse_path_file_scheme(): """Correctly parse file:// URL""" assert parse_path('file://foo.tif') == ( 'foo.tif', None, 'file') def test_parse_path_file(): """Correctly parse an ordinary filesystem path""" assert parse_path('/foo.tif') == ( '/foo.tif', None, '') def test_parse_netcdf(): """Annoying URI-like GDAL dataset names fall through properly""" assert parse_path('NETCDF:filepath:varname') == ( 'NETCDF:filepath:varname', None, None) def test_vsi_path_scheme(): """Correctly make a vsi path""" assert vsi_path( 'foo.tif', 'tests/data/files.zip', 'zip') == '/vsizip/tests/data/files.zip/foo.tif' def test_vsi_path_file(): """Correctly make and ordinary file path from a file path""" assert vsi_path( 'foo.tif', None, 'file') == 'foo.tif' def test_read_vfs_zip(): with rasterio.open( 'zip://tests/data/files.zip!/RGB.byte.tif') as src: assert src.name == 'zip://tests/data/files.zip!/RGB.byte.tif' assert src.count == 3 def test_read_vfs_file(): with rasterio.open( 'file://tests/data/RGB.byte.tif') as src: assert src.name == 'file://tests/data/RGB.byte.tif' assert src.count == 3 def test_read_vfs_zip_cmp_array(): with rasterio.open( 'zip://tests/data/files.zip!/RGB.byte.tif') as src: zip_arr = src.read() with rasterio.open( 'file://tests/data/RGB.byte.tif') as src: file_arr = src.read() assert zip_arr.dumps() == file_arr.dumps() def test_read_vfs_none(): with rasterio.open( 'tests/data/RGB.byte.tif') as src: assert src.name == 'tests/data/RGB.byte.tif' assert src.count == 3 @pytest.mark.parametrize('mode', ['r+', 'w']) def test_update_vfs(tmpdir, mode): """VFS datasets can not be created or updated""" profile = default_gtiff_profile.copy() profile.update(count=1, width=1, height=1) with pytest.raises(TypeError): rasterio.open('zip://{0}'.format(tmpdir), mode, **profile)
brendan-ward/rasterio
tests/test_vfs.py
Python
bsd-3-clause
2,689
[ "NetCDF" ]
0c96c2925457940c70c0f92103bfc38fe0f0830958af37905acd4c1ded0edeb7
import datetime import time import requests # Last updated: 5/31/2013 HEROES = dict( abaddon=102, alchemist=73, ancient_apparition=68, antimage=1, axe=2, bane=3, batrider=65, beastmaster=38, bloodseeker=4, bounty_hunter=62, brewmaster=78, bristleback=99, broodmother=61, centaur=96, chaos_knight=81, chen=66, clinkz=56, crystal_maiden=5, dark_seer=55, dazzle=50, death_prophet=43, disruptor=87, doom_bringer=69, dragon_knight=49, drow_ranger=6, earthshaker=7, elder_titan=103, enchantress=58, enigma=33, faceless_void=41, furion=53, gyrocopter=72, huskar=59, invoker=74, jakiro=64, juggernaut=8, keeper_of_the_light=90, kunkka=23, legion_commander=104, leshrac=52, lich=31, life_stealer=54, lina=25, lion=26, lone_druid=80, luna=48, lycan=77, magnataur=97, medusa=94, meepo=82, mirana=9, morphling=10, naga_siren=89, necrolyte=36, nevermore=11, night_stalker=60, nyx_assassin=88, obsidian_destroyer=76, ogre_magi=84, omniknight=57, phantom_assassin=44, phantom_lancer=12, puck=13, pudge=14, pugna=45, queenofpain=39, rattletrap=51, razor=15, riki=32, rubick=86, sand_king=16, shadow_demon=79, shadow_shaman=27, shredder=98, silencer=75, skeleton_king=42, skywrath_mage=101, slardar=28, slark=93, sniper=35, spectre=67, spirit_breaker=71, storm_spirit=17, sven=18, templar_assassin=46, tidehunter=29, tinker=34, tiny=19, treant=83, troll_warlord=95, tusk=100, undying=85, ursa=70, vengefulspirit=20, venomancer=40, viper=47, visage=92, warlock=37, weaver=63, windrunner=21, wisp=91, witch_doctor=30, zuus=22, ) MATCH_SKILL_LEVELS = ( 0, # Any 1, # Normal 2, # High 3, # Very high ) MATCH_GAME_MODES = ( 1, # All Pick 2, # Captains Mode 3, # Random Draft 4, # Single Draft 5, # All Random 6, # ?? INTRO/DEATH ?? 7, # The Diretide 8, # Reverse Captains Mode 9, # Greeviling 10, # Tutorial 11, # Mid Only 12, # Least Played 13, # New Player Poo ) class Hero(object): images_url = 'http://media.steampowered.com/apps/dota2/images/heroes/{}' def __init__(self, **kwargs): for n, v in kwargs.items(): setattr(self, n, v) def __repr__(self): return getattr(self, 'localized_name', self.name) @property def full_image(self): return self.images_url.format(self.name[14:] + '_full.png') @property def thumbnail_image(self): return self.images_url.format(self.name[14:] + '_sb.png') class Dota2APIError(Exception): pass class Dota2API(object): base_url = 'https://api.steampowered.com' api_key = None def __init__(self, api_key, base_url=None): self.api_key = api_key if base_url: self.base_url = base_url def __request(self, method, path, **kwargs): url = self.base_url + path if not self.api_key: raise AttributeError('api_key not yet set') kwargs.setdefault('params', dict()).update(key=self.api_key) return requests.request(method, url, **kwargs).json() def __to_timestamp(self, date): if type(date) == datetime.datetime: date = time.mktime(date.timetuple()) return int(date) def get_steam_id(self, vanity_name, **params): path = '/ISteamUser/ResolveVanityURL/v0001' params.update( vanityurl=vanity_name, ) response = self.__request('get', path, params=params).get('response') if response and response['success']: return response['steamid'] def get_player_summaries(self, steam_ids, **params): if type(steam_ids) not in (str, unicode): steam_ids = ','.join(map(str, steam_ids)) path = '/ISteamUser/GetPlayerSummaries/v0002' params.update( steamids=steam_ids, ) return (self.__request('get', path, params=params) .get('response', {}) .get('players', [])) def get_heroes(self, **params): """Get up to date list of heroes""" path = '/IEconDOTA2_570/GetHeroes/v0001' params.setdefault('language', 'en_us') heroes = (self.__request('get', path, params=params) .get('result', {}) .get('heroes', [])) for hero_attrs in heroes: yield Hero(**hero_attrs) def get_match_history(self, player_name=None, hero_id=None, game_mode=None, skill=0, date_min=None, date_max=None, min_players=None, account_id=None, league_id=None, start_at_match_id=None, matches_requested=25, tournament_games_only=None, **params): if hero_id: if type(hero_id) == 'str': hero_id = HEROES[hero_id] hero_id = int(hero_id) if hero_id not in HEROES.values(): raise ValueError('Invalid hero id: %r', hero_id) if game_mode: game_mode = int(game_mode) if game_mode not in MATCH_GAME_MODES: raise ValueError('Invalid match game mode: %r' % game_mode) if skill: skill = int(skill) if skill not in MATCH_SKILL_LEVELS: raise ValueError('Invalid match skill level: %r' % skill) if date_min: date_min = self.__to_timestamp(date_min) if date_max: date_max = self.__to_timestamp(date_max) matches_requested = int(matches_requested) if matches_requested > 25: req_count, last_req = divmod(matches_requested, 25) if last_req > 0: req_count += 1 matches_requested = 25 else: req_count, last_req = 1, matches_requested path = '/IDOTA2Match_570/GetMatchHistory/v001' params.update( player_name=player_name, hero_id=hero_id, game_mode=game_mode, skill=skill, date_min=date_min, date_max=date_max, min_players=min_players, account_id=account_id, league_id=league_id, start_at_match_id=start_at_match_id, matches_requested=matches_requested, tournament_games_only=tournament_games_only, ) matches = [] for i in range(req_count): if i + 1 == req_count and last_req > 0: params.update(matches_requested=last_req) response = self.__request('get', path, params=params) if response['result']['status'] != 1: raise Dota2APIError(response['result']['statusDetail']) curr_matches = response['result']['matches'] if len(curr_matches) > 0: params.update( start_at_match_id=curr_matches[-1]['match_id'] - 1, ) matches.extend(curr_matches) if response['result']['results_remaining'] < 1: break response['result'].update( matches=matches, num_results=len(matches), ) return response['result'] def get_match_details(self, match_id, **params): path = '/IDOTA2Match_570/GetMatchDetails/v001' params.update( match_id=match_id, ) return self.__request('get', path, params=params).get('result') def get_league_listing(self): path = '/IDOTA2Match_570/GetLeagueListing/v001' return (self.__request('get', path) .get('result', {}) .get('leagues', [])) def get_live_league_games(self): path = '/IDOTA2Match_570/GetLiveLeagueGames/v001' return (self.__request('get', path) .get('result', {}) .get('games', []))
bryanveloso/dota2.py
dota2.py
Python
mit
8,184
[ "TINKER" ]
8566022181527539e7bafe181ca76003f0fff661cd43874a134d6137e9eb0b57
######################################################################## # File : LocalComputingElement.py # Author : Ricardo Graciani, A.T. ######################################################################## """ LocalComputingElement is a class to handle non-grid computing clusters """ import os import stat import shutil import tempfile import getpass from urlparse import urlparse from DIRAC import S_OK, S_ERROR from DIRAC import gConfig from DIRAC.Resources.Computing.ComputingElement import ComputingElement from DIRAC.Resources.Computing.PilotBundle import bundleProxy, writeScript from DIRAC.Core.Utilities.List import uniqueElements from DIRAC.Core.Utilities.File import makeGuid from DIRAC.Core.Utilities.Subprocess import systemCall class LocalComputingElement( ComputingElement ): ############################################################################# def __init__( self, ceUniqueID ): """ Standard constructor. """ ComputingElement.__init__( self, ceUniqueID ) self.ceType = '' self.execution = "Local" self.batchSystem = self.ceParameters.get( 'BatchSystem', 'Host' ) self.batchModuleFile = None self.submittedJobs = 0 self.userName = getpass.getuser() def _reset( self ): """ Process CE parameters and make necessary adjustments """ self.batchSystem = self.ceParameters.get( 'BatchSystem', 'Host' ) self.loadBatchSystem() self.queue = self.ceParameters['Queue'] if 'ExecQueue' not in self.ceParameters or not self.ceParameters['ExecQueue']: self.ceParameters['ExecQueue'] = self.ceParameters.get( 'Queue', '' ) self.execQueue = self.ceParameters['ExecQueue'] self.log.info( "Using queue: ", self.queue ) self.sharedArea = self.ceParameters['SharedArea'] self.batchOutput = self.ceParameters['BatchOutput'] if not self.batchOutput.startswith( '/' ): self.batchOutput = os.path.join( self.sharedArea, self.batchOutput ) self.batchError = self.ceParameters['BatchError'] if not self.batchError.startswith( '/' ): self.batchError = os.path.join( self.sharedArea, self.batchError ) self.infoArea = self.ceParameters['InfoArea'] if not self.infoArea.startswith( '/' ): self.infoArea = os.path.join( self.sharedArea, self.infoArea ) self.executableArea = self.ceParameters['ExecutableArea'] if not self.executableArea.startswith( '/' ): self.executableArea = os.path.join( self.sharedArea, self.executableArea ) self.workArea = self.ceParameters['WorkArea'] if not self.workArea.startswith( '/' ): self.workArea = os.path.join( self.sharedArea, self.workArea ) result = self._prepareHost() if not result['OK']: return result self.submitOptions = '' if 'SubmitOptions' in self.ceParameters: self.submitOptions = self.ceParameters['SubmitOptions'] self.removeOutput = True if 'RemoveOutput' in self.ceParameters: if self.ceParameters['RemoveOutput'].lower() in ['no', 'false', '0']: self.removeOutput = False return S_OK() ############################################################################# def _addCEConfigDefaults( self ): """Method to make sure all necessary Configuration Parameters are defined """ # First assure that any global parameters are loaded ComputingElement._addCEConfigDefaults( self ) # Now batch system specific ones if 'ExecQueue' not in self.ceParameters: self.ceParameters['ExecQueue'] = self.ceParameters.get( 'Queue', '' ) if 'SharedArea' not in self.ceParameters: defaultPath = os.environ.get( 'HOME', '.' ) self.ceParameters['SharedArea'] = gConfig.getValue( '/LocalSite/InstancePath', defaultPath ) if 'BatchOutput' not in self.ceParameters: self.ceParameters['BatchOutput'] = 'data' if 'BatchError' not in self.ceParameters: self.ceParameters['BatchError'] = 'data' if 'ExecutableArea' not in self.ceParameters: self.ceParameters['ExecutableArea'] = 'data' if 'InfoArea' not in self.ceParameters: self.ceParameters['InfoArea'] = 'info' if 'WorkArea' not in self.ceParameters: self.ceParameters['WorkArea'] = 'work' def _prepareHost( self ): """ Prepare directories and copy control script """ # Make remote directories dirTuple = uniqueElements( [ self.sharedArea, self.executableArea, self.infoArea, self.batchOutput, self.batchError, self.workArea] ) cmdTuple = [ 'mkdir', '-p' ] + dirTuple self.log.verbose( 'Creating working directories' ) result = systemCall( 30, cmdTuple ) if not result['OK']: self.log.warn( 'Failed creating working directories: %s' % result['Message'][1] ) return result status, output, _error = result['Value'] if status != 0: self.log.warn( 'Failed to create directories: %s' % output ) return S_ERROR( 'Failed to create directories: %s' % output ) return S_OK() def submitJob( self, executableFile, proxy = None, numberOfJobs = 1 ): if not os.access( executableFile, 5 ): os.chmod( executableFile, stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH ) # if no proxy is supplied, the executable can be submitted directly # otherwise a wrapper script is needed to get the proxy to the execution node # The wrapper script makes debugging more complicated and thus it is # recommended to transfer a proxy inside the executable if possible. if proxy: self.log.verbose( 'Setting up proxy for payload' ) wrapperContent = bundleProxy( executableFile, proxy ) name = writeScript( wrapperContent, os.getcwd() ) submitFile = name else: # no proxy submitFile = executableFile jobStamps = [] for _i in range( numberOfJobs ): jobStamps.append( makeGuid()[:8] ) batchDict = { 'Executable': submitFile, 'NJobs': numberOfJobs, 'OutputDir': self.batchOutput, 'ErrorDir': self.batchError, 'SubmitOptions': self.submitOptions, 'ExecutionContext': self.execution, 'JobStamps': jobStamps } resultSubmit = self.batch.submitJob( **batchDict ) if proxy: os.remove( submitFile ) if resultSubmit['Status'] == 0: self.submittedJobs += len( resultSubmit['Jobs'] ) # jobIDs = [ self.ceType.lower()+'://'+self.ceName+'/'+_id for _id in resultSubmit['Jobs'] ] # FIXME: It would be more proper to fix pilotCommands.__setFlavour where 'ssh' is hardcoded than # making this illogical fix, but there is no good way for pilotCommands to know its origin ceType. # So, the jobIDs here need to start with 'ssh', not ceType, to accomodate them to those hardcoded in pilotCommands.__setFlavour jobIDs = [ 'ssh'+self.batchSystem.lower()+'://'+self.ceName+'/'+_id for _id in resultSubmit['Jobs'] ] result = S_OK( jobIDs ) else: result = S_ERROR( resultSubmit['Message'] ) return result def killJob( self, jobIDList ): """ Kill a bunch of jobs """ batchDict = { 'JobIDList': jobIDList } resultKill = self.batch.killJob( **batchDict ) if resultKill['Status'] == 0: return S_OK() else: return S_ERROR( resultKill['Message'] ) def getCEStatus( self ): """ Method to return information on running and pending jobs. """ result = S_OK() result['SubmittedJobs'] = self.submittedJobs result['RunningJobs'] = 0 result['WaitingJobs'] = 0 batchDict = { 'User': self.userName } resultGet = self.batch.getCEStatus( **batchDict ) if resultGet['Status'] == 0: result['RunningJobs'] = resultGet.get( 'Running', 0 ) result['WaitingJobs'] = resultGet.get( 'Waiting', 0 ) else: result = S_ERROR( resultGet['Message'] ) self.log.verbose( 'Waiting Jobs: ', result['WaitingJobs'] ) self.log.verbose( 'Running Jobs: ', result['RunningJobs'] ) return result def getJobStatus( self, jobIDList ): """ Get the status information for the given list of jobs """ stampList = [] for job in jobIDList: stamp = os.path.basename( urlparse( job ).path ) stampList.append(stamp) batchDict = { 'JobIDList': stampList, 'User': self.userName } resultGet = self.batch.getJobStatus( **batchDict ) if resultGet['Status'] == 0: result = S_OK( resultGet['Jobs'] ) else: result = S_ERROR( resultGet['Message'] ) return result def getJobOutput( self, jobID, localDir = None ): """ Get the specified job standard output and error files. If the localDir is provided, the output is returned as file in this directory. Otherwise, the output is returned as strings. """ result = self._getJobOutputFiles( jobID ) if not result['OK']: return result jobStamp, _host, outputFile, errorFile = result['Value'] self.log.verbose( 'Getting output for jobID %s' % jobID ) if not localDir: tempDir = tempfile.mkdtemp() else: tempDir = localDir try: localOut = os.path.join( tempDir, '%s.out' % jobStamp ) localErr = os.path.join( tempDir, '%s.err' % jobStamp ) if os.path.exists( outputFile ): shutil.copy( outputFile, localOut ) if os.path.exists( errorFile ): shutil.copy( errorFile, localErr ) except Exception as x: return S_ERROR( 'Failed to get output files: %s' % str( x ) ) open( localOut, 'a' ).close() open( localErr, 'a' ).close() # The result is OK, we can remove the output if self.removeOutput and os.path.exists( outputFile ): os.remove( outputFile ) if self.removeOutput and os.path.exists( errorFile ): os.remove( errorFile ) if localDir: return S_OK( ( localOut, localErr ) ) else: # Return the output as a string outputFile = open( localOut, 'r' ) output = outputFile.read() outputFile.close() outputFile = open( localErr, 'r' ) error = outputFile.read() outputFile.close() shutil.rmtree( tempDir ) return S_OK( ( output, error ) ) def _getJobOutputFiles( self, jobID ): """ Get output file names for the specific CE """ jobStamp = os.path.basename( urlparse( jobID ).path ) host = urlparse( jobID ).hostname if hasattr( self.batch, 'getOutputFiles' ): output, error = self.batch.getOutputFiles( jobStamp, self.batchOutput, self.batchError ) else: output = '%s/%s.out' % ( self.batchOutput, jobStamp ) error = '%s/%s.out' % ( self.batchError, jobStamp ) return S_OK( ( jobStamp, host, output, error ) ) # EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#
hgiemza/DIRAC
Resources/Computing/LocalComputingElement.py
Python
gpl-3.0
11,212
[ "DIRAC" ]
d56b20506918a68dadc529e9176533743327e319aee042b4124fadcdb7f8c9ce
''' Dts_Stripper.py Copyright (c) 2004 - 2006 James Urquhart(j_urquhart@btinternet.com) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ''' import bpy from .Torque_Util import * import math import copy from .QADTriStripper import * ''' Generic Triangle Stripper Interface ''' use_stripper = "QAD" # use_stripper = "VTK" # use_stripper = "NVIDIA" class Stripper: def __init__(self): self.clear() def __del__(self): del self.faces del self.strips def strip(self): self.strips = [] def clear(self): self.strips = [] self.faces = [] from .Stripper_VTK import * # from Stripper_NVIDIA import * def chooseStripper(): global use_stripper if use_stripper == "VTK" and vtk != None: return VTKStripper() elif use_stripper == "QAD": return QADTriStripper(Stripper.maxStripSize) # elif use_stripper == "NVIDIA": return NVIDIAStripper() else: return None
pchan126/Blender_DTS_30
DTSPython/Dts_Stripper.py
Python
mit
1,942
[ "VTK" ]
a3c8e1b0b6d0acb2f001b3f2a666aef57c0ab78218b7af8d51204e9649c72629
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Handles function calls, by generating compiled function names and calls. Note: this transformer does not rename the top level object being converted; that is the caller's responsibility. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from collections import namedtuple import gast from tensorflow.python.autograph.core import converter from tensorflow.python.autograph.pyct import anno from tensorflow.python.autograph.pyct import ast_util from tensorflow.python.autograph.pyct import inspect_utils from tensorflow.python.autograph.pyct import parser from tensorflow.python.autograph.pyct import templates from tensorflow.python.util import tf_inspect class FunctionInfo(namedtuple('FunctionInfo', ('dtype',))): pass # TODO(mdan): Move this to config.py. KNOWN_NUMPY_FUNCTIONS = { ('numpy', 'random', 'binomial'): FunctionInfo(dtype='tf.int64'), } # TODO(mdan): Get rid of these interfaces. Can now depend directly on Namer. class FunctionNamer(object): """Describes the interface for CallTreeTransformer's namer.""" def compiled_function_name(self, original_fqn, live_entity=None, owner_type=None): """Generate the name corresponding to the compiled version of a function. Args: original_fqn: string or tuple(string) live_entity: Callable, the actual target function, if known. owner_type: Optional object. If present, it indicates that the function is a member of the given type. Returns: string, bool """ raise NotImplementedError() def compiled_class_name(self, original_fqn, live_entity=None): """Generate the name corresponding to the compiled version of a class. Args: original_fqn: string or tuple(string) live_entity: The actual target class, if known. Returns: string """ raise NotImplementedError() # TODO(mdan): Rename to CallsTransformer. class CallTreeTransformer(converter.Base): """Transforms the call tree by renaming transformed symbols.""" def _resolve_name(self, node): """Used to resolve decorator info.""" if isinstance(node, gast.Call): return self._resolve_name(node.func) if isinstance(node, gast.Name): return self.ctx.namespace.get(node.id) if isinstance(node, gast.Attribute): parent = self._resolve_name(node.value) if parent is not None: return getattr(parent, node.attr) return None raise ValueError(node) def _try_resolve_target(self, node): """Works for methods of objects of known type.""" if anno.hasanno(node, 'live_val'): return anno.getanno(node, 'live_val') if isinstance(node, gast.Attribute) and anno.hasanno(node, 'type'): owner_type = anno.getanno(node, 'type') if hasattr(owner_type, node.attr): return getattr(owner_type, node.attr) else: raise ValueError('Type "%s" has not attribute "%s". Is it dynamic?' % (owner_type, node.attr)) return None def _function_is_compilable(self, target_entity): """Determines whether an entity can be compiled at all.""" # TODO(mdan): This is just a placeholder. Implement. return not inspect_utils.isbuiltin(target_entity) def _should_compile(self, node, fqn): """Determines whether an entity should be compiled in the context.""" # TODO(mdan): Needs cleanup. We should remove the use of fqn altogether. module_name = fqn[0] for mod in self.ctx.program.uncompiled_modules: if module_name.startswith(mod[0] + '.'): return False for i in range(1, len(fqn)): if fqn[:i] in self.ctx.program.uncompiled_modules: return False # Check for local decorations if anno.hasanno(node, 'graph_ready'): return False # The decorators themselves are not to be converted. # If present, the decorators should appear as static functions. target_entity = self._try_resolve_target(node.func) if target_entity is not None: # This may be reached when "calling" a callable attribute of an object. # For example: # # self.fc = tf.keras.layers.Dense() # self.fc() # for mod in self.ctx.program.uncompiled_modules: if target_entity.__module__.startswith(mod[0] + '.'): return False # This attribute is set by the decorator itself. # TODO(mdan): This may not play nicely with other wrapping decorators. if hasattr(target_entity, '__pyct_is_compile_decorator'): return False if target_entity in self.ctx.program.options.strip_decorators: return False # Inspect the target function decorators. If any include a @convert # or @graph_ready annotation, then they must be called as they are. # TODO(mdan): This may be quite heavy. # To parse and re-analyze each function for every call site could be quite # wasteful. Maybe we could cache the parsed AST? try: target_node, _ = parser.parse_entity(target_entity) target_node = target_node.body[0] except TypeError: # Functions whose source we cannot access are compilable (e.g. wrapped # to py_func). return True for dec in target_node.decorator_list: decorator_fn = self._resolve_name(dec) if (decorator_fn is not None and decorator_fn in self.ctx.program.options.strip_decorators): return False return True def _rename_compilable_function(self, node): assert anno.hasanno(node.func, 'live_val') assert anno.hasanno(node.func, 'fqn') target_entity = anno.getanno(node.func, 'live_val') target_fqn = anno.getanno(node.func, 'fqn') if not self._should_compile(node, target_fqn): return node if anno.hasanno(node, 'is_constructor'): new_name = self.ctx.namer.compiled_class_name( target_fqn, live_entity=target_entity) do_rename = True else: if anno.hasanno(node.func, 'parent_type'): owner_type = anno.getanno(node.func, 'parent_type') else: # Fallback - not reliable. owner_type = inspect_utils.getmethodclass(target_entity) new_name, do_rename = self.ctx.namer.compiled_function_name( target_fqn, live_entity=target_entity, owner_type=owner_type) if do_rename: if target_entity is not None: if tf_inspect.ismethod(target_entity): # The renaming process will transform it into a regular function. # TODO(mdan): Is this complete? How does it work with nested members? node.args = [node.func.value] + node.args node.func = templates.replace('func_name', func_name=new_name)[0] return node def _wrap_to_py_func_no_return(self, node): # TODO(mdan): Properly handle varargs, etc. template = """ ag__.utils.wrap_py_func(func, None, (args,), kwargs, True) """ return templates.replace( template, func=node.func, args=node.args, kwargs=ast_util.keywords_to_dict(node.keywords)) def _wrap_to_py_func_single_return(self, node, dtype): # TODO(mdan): Properly handle varargs, etc. template = """ ag__.utils.wrap_py_func(func, dtype, (args,), kwargs, False) """ return templates.replace_as_expression( template, func=node.func, dtype=parser.parse_expression(dtype), args=node.args, kwargs=ast_util.keywords_to_dict(node.keywords)) def _insert_dynamic_conversion(self, node): """Inlines a dynamic conversion for a dynamic function.""" # TODO(mdan): Pass information on the statically compiled functions. # Having access to the statically compiled functions can help avoid # unnecessary compilation. # For example, this would lead to function `a` being compiled twice: # # def a(): # v = b # b() # def b(): # a() # # This is really a problem with recursive calls, which currently can # only be gated by a static condition, and should be rare. # TODO(mdan): It probably makes sense to use dynamic conversion every time. # Before we could convert all the time though, we'd need a reasonable # caching mechanism. template = """ ag__.converted_call(func, owner, options, args) """ if isinstance(node.func, gast.Attribute): func = gast.Str(node.func.attr) owner = node.func.value else: func = node.func owner = parser.parse_expression('None') call_expr = templates.replace( template, func=func, owner=owner, options=self.ctx.program.options.to_ast(self.ctx.info.namespace), args=node.args) new_call = call_expr[0].value # TODO(mdan): Improve the template mechanism to better support this. new_call.keywords = node.keywords return new_call def visit_Expr(self, node): if isinstance(node.value, gast.Call): if anno.hasanno(node.value.func, 'live_val'): target_entity = anno.getanno(node.value.func, 'live_val') if not self._function_is_compilable(target_entity): if anno.hasanno(node.value.func, 'fqn'): target_fqn = anno.getanno(node.value.func, 'fqn') if not self._should_compile(node.value, target_fqn): return node node = self._wrap_to_py_func_no_return(node.value) return node # Only the case of py_func with no return value is special. # Everything else is processed by visit_Call. self.visit(node.value) else: self.generic_visit(node) return node def visit_Call(self, node): # If the function call is wrapped by one of the marker decorators, # consider it graph ready. if anno.hasanno(node.func, 'live_val'): target_entity = anno.getanno(node.func, 'live_val') if target_entity in self.ctx.program.options.strip_decorators: if len(node.args) < 1: raise ValueError( 'Found call to decorator function "%s", but it had no arguments. ' 'A decorator needs at least one positional argument.' % target_entity) anno.setanno(node.args[0], 'graph_ready', True) self.generic_visit(node) if anno.hasanno(node.func, 'live_val'): target_entity = anno.getanno(node.func, 'live_val') if anno.hasanno(node.func, 'fqn'): target_fqn = anno.getanno(node.func, 'fqn') else: target_fqn = None if self._function_is_compilable(target_entity): node = self._rename_compilable_function(node) elif target_fqn and target_fqn in KNOWN_NUMPY_FUNCTIONS: # TODO(mdan): Should we replace these with equivalent TF ops instead? node = self._wrap_to_py_func_single_return( node, KNOWN_NUMPY_FUNCTIONS[target_fqn].dtype) else: raise NotImplementedError( 'py_func with return values (unknown function)') else: if anno.hasanno(node.func, anno.Basic.QN): # Special-case a few builtins that otherwise go undetected. This # normally doesn't pose a problem, but the dict built-in doesn't # work with inspect.getargspec which is required for dynamic functions. # Note: expecting this is resilient to aliasing (e.g. # dict = an_evil_dict), because in those cases the regular mechanisms # process a simple user function. qn = anno.getanno(node.func, anno.Basic.QN) # Add items to this list as needed. if str(qn) in ('dict',): return node if ast_util.matches(node, 'super(_)'): # super() calls are preserved. The class conversion mechanism will # ensure that they return the correct value. return node if self.ctx.program.options.recursive: node = self._insert_dynamic_conversion(node) return node def transform(node, ctx): """Transform function call to the compiled counterparts. Args: node: AST ctx: EntityContext Returns: A tuple (node, new_names): node: The transformed AST new_names: set(string), containing any newly-generated names """ return CallTreeTransformer(ctx).visit(node)
girving/tensorflow
tensorflow/python/autograph/converters/call_trees.py
Python
apache-2.0
12,990
[ "VisIt" ]
cac5c3e914efd46fac8776418ca62d9c54264e0988d995e3c2d62d9cb88badc4
#!/usr/bin/env python # -*- coding: UTF-8 -*- """ Syntenic path assembly. """ import sys import logging from collections import defaultdict from itertools import groupby, combinations from more_itertools import pairwise from jcvi.formats.blast import BlastSlow, Blast from jcvi.formats.sizes import Sizes from jcvi.formats.base import LineFile, must_open from jcvi.utils.range import range_intersect from jcvi.algorithms.graph import BiGraph from jcvi.apps.base import OptionParser, ActionDispatcher class OVLLine: def __init__(self, row): # tig00000004 tig00042923 I -64039 -18713 16592 99.84 # See also: assembly.goldenpath.Overlap for another implementation args = row.split() self.a = args[0] self.b = args[1] self.bstrand = "+" if args[2] == "N" else "-" self.ahang = int(args[3]) self.bhang = int(args[4]) self.overlap = int(args[5]) self.pctid = float(args[6]) self.score = int(self.overlap * self.pctid / 100) self.best = None @property def tag(self): if self.ahang >= 0: t = "a->b" if self.bhang > 0 else "b in a" elif self.ahang < 0: t = "b->a" if self.bhang < 0 else "a in b" return t class OVL(LineFile): def __init__(self, filename): super(OVL, self).__init__(filename) fp = must_open(filename) contained = set() alledges = defaultdict(list) for row in fp: o = OVLLine(row) self.append(o) if o.tag == "a in b": contained.add(o.a) elif o.tag == "b in a": contained.add(o.b) if o.tag == "a->b": alledges[o.a + "-3`"].append(o) elif o.tag == "b->a": alledges[o.a + "-5`"].append(o) logging.debug( "Imported {} links. Contained tigs: {}".format(len(self), len(contained)) ) self.contained = contained logging.debug("Pruning edges to keep the mutual best") for k, v in alledges.items(): bo = max(v, key=lambda x: x.score) bo.best = True self.graph = BiGraph() for o in self: if not o.best: continue if o.tag == "a->b": a, b = o.a, o.b elif o.tag == "b->a": a, b = o.b, o.a if a in contained or b in contained: continue bstrand = "<" if o.bstrand == "-" else ">" self.graph.add_edge(a, b, ">", bstrand, length=o.score) def main(): actions = ( ("bed", "convert ANCHORS file to BED format"), ("fromblast", "Generate path from BLAST file"), ("fromovl", "build overlap graph from AMOS overlaps"), ("happy", "Make graph from happy mapping data"), ("partition", "Make individual graphs partitioned by happy mapping"), ("merge", "Merge multiple graphs together and visualize"), ("connect", "connect contigs using long reads"), ) p = ActionDispatcher(actions) p.dispatch(globals()) def fromovl(args): """ %prog graph nucmer2ovl.ovl fastafile Build overlap graph from ovl file which is converted using NUCMER2OVL. """ p = OptionParser(fromovl.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) ovlfile, fastafile = args ovl = OVL(ovlfile) g = ovl.graph fw = open("contained.ids", "w") print("\n".join(sorted(ovl.contained)), file=fw) graph_to_agp(g, ovlfile, fastafile, exclude=ovl.contained, verbose=False) def bed(args): """ %prog bed anchorsfile Convert ANCHORS file to BED format. """ from collections import defaultdict from jcvi.compara.synteny import AnchorFile, check_beds from jcvi.formats.bed import Bed from jcvi.formats.base import get_number p = OptionParser(bed.__doc__) p.add_option( "--switch", default=False, action="store_true", help="Switch reference and aligned map elements", ) p.add_option( "--scale", type="float", help="Scale the aligned map distance by factor" ) p.set_beds() p.set_outfile() opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) (anchorsfile,) = args switch = opts.switch scale = opts.scale ac = AnchorFile(anchorsfile) pairs = defaultdict(list) for a, b, block_id in ac.iter_pairs(): pairs[a].append(b) qbed, sbed, qorder, sorder, is_self = check_beds(anchorsfile, p, opts) bd = Bed() for q in qbed: qseqid, qstart, qend, qaccn = q.seqid, q.start, q.end, q.accn if qaccn not in pairs: continue for s in pairs[qaccn]: si, s = sorder[s] sseqid, sstart, send, saccn = s.seqid, s.start, s.end, s.accn if switch: qseqid, sseqid = sseqid, qseqid qstart, sstart = sstart, qstart qend, send = send, qend qaccn, saccn = saccn, qaccn if scale: sstart /= scale try: newsseqid = get_number(sseqid) except ValueError: raise ValueError( "`{0}` is on `{1}` with no number to extract".format(saccn, sseqid) ) bedline = "\t".join( str(x) for x in (qseqid, qstart - 1, qend, "{0}:{1}".format(newsseqid, sstart)) ) bd.add(bedline) bd.print_to_file(filename=opts.outfile, sorted=True) def happy_nodes(row, prefix=None): row = row.translate(None, "[](){}+-") scfs = [x.strip() for x in row.split(":")] if prefix: scfs = [prefix + x for x in scfs] return scfs def happy_edges(row, prefix=None): """ Convert a row in HAPPY file and yield edges. """ trans = str.maketrans("[](){}", " ") row = row.strip().strip("+") row = row.translate(trans) scfs = [x.strip("+") for x in row.split(":")] for a, b in pairwise(scfs): oa = "<" if a.strip()[0] == "-" else ">" ob = "<" if b.strip()[0] == "-" else ">" is_uncertain = a[-1] == " " or b[0] == " " a = a.strip().strip("-") b = b.strip().strip("-") if prefix: a = prefix + a b = prefix + b yield (a, b, oa, ob), is_uncertain def partition(args): """ %prog partition happy.txt synteny.graph Select edges from another graph and merge it with the certain edges built from the HAPPY mapping data. """ allowed_format = ("png", "ps") p = OptionParser(partition.__doc__) p.add_option("--prefix", help="Add prefix to the name") p.add_option( "--namestart", default=0, type="int", help="Use a shorter name, starting index", ) p.add_option( "--format", default="png", choices=allowed_format, help="Generate image of format", ) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) happyfile, graphfile = args bg = BiGraph() bg.read(graphfile, color="red") prefix = opts.prefix fp = open(happyfile) for i, row in enumerate(fp): nns = happy_nodes(row, prefix=prefix) nodes = set(nns) edges = happy_edges(row, prefix=prefix) small_graph = BiGraph() for (a, b, oa, ob), is_uncertain in edges: color = "gray" if is_uncertain else "black" small_graph.add_edge(a, b, oa, ob, color=color) for (u, v), e in bg.edges.items(): # Grab edge if both vertices are on the same line if u in nodes and v in nodes: uv = (str(u), str(v)) if uv in small_graph.edges: e = small_graph.edges[uv] e.color = "blue" # supported by both evidences else: small_graph.add_edge(e) print(small_graph, file=sys.stderr) pngfile = "A{0:02d}.{1}".format(i + 1, opts.format) telomeres = (nns[0], nns[-1]) small_graph.draw( pngfile, namestart=opts.namestart, nodehighlight=telomeres, dpi=72 ) legend = [ "Edge colors:", "[BLUE] Experimental + Synteny", "[BLACK] Experimental certain", "[GRAY] Experimental uncertain", "[RED] Synteny only", "Rectangle nodes are telomeres.", ] print("\n".join(legend), file=sys.stderr) def merge(args): """ %prog merge graphs Merge multiple graphs together and visualize. """ p = OptionParser(merge.__doc__) p.add_option( "--colorlist", default="black,red,pink,blue,green", help="The color palette", ) opts, args = p.parse_args(args) if len(args) < 1: sys.exit(not p.print_help()) colorlist = opts.colorlist.split(",") assert len(colorlist) >= len(args), "Need more colors in --colorlist" g = BiGraph() for a, c in zip(args, colorlist): g.read(a, color=c) g.draw("merged.png") def happy(args): """ %prog happy happy.txt Make bi-directed graph from HAPPY mapping data. JCVI encodes uncertainties in the order of the contigs / scaffolds. : separates scaffolds + means telomere (though the telomere repeats may not show because the telomere-adjacent sequence is missing) - means that the scaffold is in reverse orientation to that shown in the 2003 TIGR scaffolds. Ambiguities are represented as follows, using Paul Dear.s description: [ ] means undetermined orientation. error quite possible (70% confidence?) ( ) means uncertain orientation. small chance of error (90% confidence?) { } means uncertain order. Example: +-8254707:8254647:-8254690:{[8254694]:[8254713]:[8254531]:[8254797]}:8254802:8254788+ """ p = OptionParser(happy.__doc__) p.add_option("--prefix", help="Add prefix to the name") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) (happyfile,) = args certain = "certain.graph" uncertain = "uncertain.graph" fw1 = open(certain, "w") fw2 = open(uncertain, "w") fp = open(happyfile) for row in fp: for e, is_uncertain in happy_edges(row, prefix=opts.prefix): fw = fw2 if is_uncertain else fw1 print(e, file=fw) logging.debug("Edges written to `{0}`".format(",".join((certain, uncertain)))) def fromblast(args): """ %prog fromblast blastfile subject.fasta Generate path from BLAST file. If multiple subjects map to the same query, an edge is constructed between them (with the link provided by the query). The BLAST file MUST be filtered, chained, supermapped. """ from jcvi.formats.blast import sort from jcvi.utils.range import range_distance p = OptionParser(fromblast.__doc__) p.add_option( "--clique", default=False, action="store_true", help="Populate clique instead of linear path", ) p.add_option( "--maxdist", default=100000, type="int", help="Create edge within certain distance", ) p.set_verbose(help="Print verbose reports to stdout") opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) blastfile, subjectfasta = args clique = opts.clique maxdist = opts.maxdist sort([blastfile, "--query"]) blast = BlastSlow(blastfile, sorted=True) g = BiGraph() for query, blines in groupby(blast, key=lambda x: x.query): blines = list(blines) iterator = combinations(blines, 2) if clique else pairwise(blines) for a, b in iterator: asub, bsub = a.subject, b.subject if asub == bsub: continue arange = (a.query, a.qstart, a.qstop, "+") brange = (b.query, b.qstart, b.qstop, "+") dist, oo = range_distance(arange, brange, distmode="ee") if dist > maxdist: continue atag = ">" if a.orientation == "+" else "<" btag = ">" if b.orientation == "+" else "<" g.add_edge(asub, bsub, atag, btag) graph_to_agp(g, blastfile, subjectfasta, verbose=opts.verbose) def graph_to_agp(g, blastfile, subjectfasta, exclude=[], verbose=False): from jcvi.formats.agp import order_to_agp logging.debug(str(g)) g.write("graph.txt") # g.draw("graph.pdf") paths = [] for path in g.iter_paths(): m, oo = g.path(path) if len(oo) == 1: # Singleton path continue paths.append(oo) if verbose: print(m) print(oo) npaths = len(paths) ntigs = sum(len(x) for x in paths) logging.debug( "Graph decomposed to {0} paths with {1} components.".format(npaths, ntigs) ) agpfile = blastfile + ".agp" sizes = Sizes(subjectfasta) fwagp = open(agpfile, "w") scaffolded = set() for i, oo in enumerate(paths): ctgorder = [(str(ctg), ("+" if strand else "-")) for ctg, strand in oo] scaffolded |= set(ctg for ctg, strand in ctgorder) object = "pmol_{0:04d}".format(i) order_to_agp(object, ctgorder, sizes.mapping, fwagp) # Get the singletons as well nsingletons = nscaffolded = nexcluded = 0 for ctg, size in sizes.iter_sizes(): if ctg in scaffolded: nscaffolded += 1 continue if ctg in exclude: nexcluded += 1 continue ctgorder = [(ctg, "+")] object = ctg order_to_agp(object, ctgorder, sizes.mapping, fwagp) nsingletons += 1 logging.debug( "scaffolded={} excluded={} singletons={}".format( nscaffolded, nexcluded, nsingletons ) ) fwagp.close() logging.debug("AGP file written to `{0}`.".format(agpfile)) def connect(args): """ %prog connect assembly.fasta read_mapping.blast Connect contigs using long reads. """ p = OptionParser(connect.__doc__) p.add_option( "--clip", default=2000, type="int", help="Only consider end of contigs", ) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) fastafile, blastfile = args clip = opts.clip sizes = Sizes(fastafile).mapping blast = Blast(blastfile) blasts = [] for b in blast: seqid = b.subject size = sizes[seqid] start, end = b.sstart, b.sstop cstart, cend = min(size, clip), max(0, size - clip) if start > cstart and end < cend: continue blasts.append(b) key = lambda x: x.query blasts.sort(key=key) g = BiGraph() for query, bb in groupby(blasts, key=key): bb = sorted(bb, key=lambda x: x.qstart) nsubjects = len(set(x.subject for x in bb)) if nsubjects == 1: continue print("\n".join(str(x) for x in bb)) for a, b in pairwise(bb): astart, astop = a.qstart, a.qstop bstart, bstop = b.qstart, b.qstop if a.subject == b.subject: continue arange = astart, astop brange = bstart, bstop ov = range_intersect(arange, brange) alen = astop - astart + 1 blen = bstop - bstart + 1 if ov: ostart, ostop = ov ov = ostop - ostart + 1 print(ov, alen, blen) if ov and (ov > alen / 2 or ov > blen / 2): print("Too much overlap ({0})".format(ov)) continue asub = a.subject bsub = b.subject atag = ">" if a.orientation == "+" else "<" btag = ">" if b.orientation == "+" else "<" g.add_edge(asub, bsub, atag, btag) graph_to_agp(g, blastfile, fastafile, verbose=False) if __name__ == "__main__": main()
tanghaibao/jcvi
jcvi/assembly/syntenypath.py
Python
bsd-2-clause
16,153
[ "BLAST" ]
ed920e80dd4130a741bc94a6b53efc14fa2f3cec58802dc3db745291f3c0d5be
""" """ import os import sys import argparse from roblib import bcolors, stream_blast_results __author__ = 'Rob Edwards' def self_bit_scores(blastf, verbose=False): """ Generate a dict of self:self bitscores """ ss = {} for b in stream_blast_results(blastf, verbose): if b.query == b.db: if b.query in ss and ss[b.query] > b.bitscore: continue ss[b.query] = b.bitscore return ss def pairwise_bit_scores(blastf, ss, outf, verbose=False): """ Make a pairwise average bit score that is the bitscore / average of two proteins self/self bit score ;param blastf: the blastfile :param ss: the self-self bitscores :param outf: the output file to write :param verbose: more output :return a dict of all vs. all normalized bit scores """ if verbose: sys.stderr.write(f"{bcolors.GREEN}Creating scores{bcolors.ENDC}\n") pb = {} out = open(outf + ".tsv", 'w') out.write("Query\tSubject\tQLen\tSLen\tBits\tnBits\n") for b in stream_blast_results(blastf, verbose): if b.query not in pb: pb[b.query] = {} if b.db not in pb: pb[b.db] = {} # we normalize by the bitscore of the two proteins if we can! if b.query in ss and b.db in ss: nb = b.bitscore / ((ss[b.query] + ss[b.db])/2) else: # if we can't do that, we cheat and normalize # the bit score by twice # the average length of the proteins # i.e. the sum of the lengths nb = b.bitscore / (b.query_length + b.subject_length + 3.3) if b.query in pb[b.db] and pb[b.db][b.query] > nb: continue pb[b.db][b.query] = pb[b.db][b.query] = nb out.write(f"{b.query}\t{b.db}\t{b.query_length}\t{b.subject_length}\t{b.bitscore}\t{nb}\n") return pb def print_matrix(matf, pb, verbose=False): """ Print a matrix version of the pairwise bitscores :param matf: the matrix file to write :param pb: the pairwise bitscores :param verbose: more output :return: """ if verbose: sys.stderr.write(f"{bcolors.GREEN}Creating scores{bcolors.ENDC}\n") allkeys = list(pb.keys()) with open(matf + ".mat", 'w') as out: out.write("\t".join([""] + allkeys)) out.write("\n") for p in allkeys: out.write(p) for q in allkeys: if p == q: out.write("\t0") elif q in pb[p]: out.write(f"\t{1-pb[p][q]}") else: out.write("\t1") out.write("\n") if __name__ == "__main__": parser = argparse.ArgumentParser(description=' ') parser.add_argument('-b', help='blast input file', required=True) parser.add_argument('-o', help="output file base (we write both .tsv and .mat formats)", required=True) parser.add_argument('-v', help='verbose output', action='store_true') args = parser.parse_args() ss = self_bit_scores(args.b, args.v) pb = pairwise_bit_scores(args.b, ss, args.o, args.v) print_matrix(args.o, pb, args.v)
linsalrob/EdwardsLab
phage_clustering/bit_score.py
Python
mit
3,193
[ "BLAST" ]
aa3baeafa02a826c31cd85d014e2549ecadd0ab9026d9fa89f3e43a4c2a159fc
#!/usr/bin/env python ############################################################################## # # This PPXF_POPULATION_EXAMPLE routine shows how to study stellar population with # the procedure PPXF, which implements the Penalized Pixel-Fitting (pPXF) method by # Cappellari M., & Emsellem E., 2004, PASP, 116, 138. # # MODIFICATION HISTORY: # V1.0.0: Adapted from PPXF_KINEMATICS_EXAMPLE. # Michele Cappellari, Oxford, 12 October 2011 # V1.1.0: Made a separate routine for the construction of the templates # spectral library. MC, Vicenza, 11 October 2012 # V1.1.1: Includes regul_error definition. MC, Oxford, 15 November 2012 # V2.0.0: Translated from IDL into Python. MC, Oxford, 6 December 2013 # V2.0.1: Fit SDSS rather than SAURON spectrum. MC, Oxford, 11 December 2013 # V2.0.2: Support both Python 2.6/2.7 and Python 3.x. MC, Oxford, 25 May 2014 # ############################################################################## from __future__ import print_function import pyfits from scipy import ndimage import numpy as np import glob import matplotlib.pyplot as plt from time import clock from ppxf import ppxf import ppxf_util as util def setup_spectral_library(velscale, FWHM_gal): # Read the list of filenames from the Single Stellar Population library # by Vazdekis et al. (2010, MNRAS, 404, 1639) http://miles.iac.es/. # # For this example I downloaded from the above website a set of # model spectra with default linear sampling of 0.9A/pix and default # spectral resolution of FWHM=2.51A. I selected a Salpeter IMF # (slope 1.30) and a range of population parameters: # # [M/H] = [-1.71, -1.31, -0.71, -0.40, 0.00, 0.22] # Age = range(1.0, 17.7828, 26, /LOG) # # This leads to a set of 156 model spectra with the file names like # # Mun1.30Zm0.40T03.9811.fits # # IMPORTANT: the selected models form a rectangular grid in [M/H] # and Age: for each Age the spectra sample the same set of [M/H]. # # We assume below that the model spectra have been placed in the # directory "miles_models" under the current directory. # vazdekis = glob.glob('miles_models/Mun1.30*.fits') FWHM_tem = 2.51 # Vazdekis+10 spectra have a resolution FWHM of 2.51A. # Extract the wavelength range and logarithmically rebin one spectrum # to the same velocity scale of the SAURON galaxy spectrum, to determine # the size needed for the array which will contain the template spectra. # hdu = pyfits.open(vazdekis[0]) ssp = hdu[0].data h2 = hdu[0].header lamRange_temp = h2['CRVAL1'] + np.array([0.,h2['CDELT1']*(h2['NAXIS1']-1)]) sspNew, logLam2, velscale = util.log_rebin(lamRange_temp, ssp, velscale=velscale) # Create a three dimensional array to store the # two dimensional grid of model spectra # nAges = 26 nMetal = 6 templates = np.empty((sspNew.size,nAges,nMetal)) # Convolve the whole Vazdekis library of spectral templates # with the quadratic difference between the SAURON and the # Vazdekis instrumental resolution. Logarithmically rebin # and store each template as a column in the array TEMPLATES. # Quadratic sigma difference in pixels Vazdekis --> SAURON # The formula below is rigorously valid if the shapes of the # instrumental spectral profiles are well approximated by Gaussians. # FWHM_dif = np.sqrt(FWHM_gal**2 - FWHM_tem**2) sigma = FWHM_dif/2.355/h2['CDELT1'] # Sigma difference in pixels # Here we make sure the spectra are sorted in both [M/H] # and Age along the two axes of the rectangular grid of templates. # A simple alphabetical ordering of Vazdekis's naming convention # does not sort the files by [M/H], so we do it explicitly below # metal = ['m1.71', 'm1.31', 'm0.71', 'm0.40', 'p0.00', 'p0.22'] for k, mh in enumerate(metal): files = [s for s in vazdekis if mh in s] for j, filename in enumerate(files): hdu = pyfits.open(filename) ssp = hdu[0].data ssp = ndimage.gaussian_filter1d(ssp,sigma) sspNew, logLam2, velscale = util.log_rebin(lamRange_temp, ssp, velscale=velscale) templates[:,j,k] = sspNew # Templates are *not* normalized here print(np.shape(templates)) return templates, lamRange_temp #------------------------------------------------------------------------------ def ppxf_population_example_sdss(): # Read SDSS DR8 galaxy spectrum taken from here http://www.sdss3.org/dr8/ # The spectrum is *already* log rebinned by the SDSS DR8 # pipeline and log_rebin should not be used in this case. # file = 'spectra/NGC3522_SDSS.fits' hdu = pyfits.open(file) t = hdu[1].data z = float(hdu[1].header["Z"]) # SDSS redshift estimate # Only use the wavelength range in common between galaxy and stellar library. # mask = (t.field('wavelength') > 3540) & (t.field('wavelength') < 7409) galaxy = t[mask].field('flux')/np.median(t[mask].field('flux')) # Normalize spectrum to avoid numerical issues wave = t[mask].field('wavelength') # The noise level is chosen to give Chi^2/DOF=1 without regularization (REGUL=0) # noise = galaxy*0 + 0.01528 # Assume constant noise per pixel here # The velocity step was already chosen by the SDSS pipeline # and we convert it below to km/s # c = 299792.458 # speed of light in km/s velscale = np.log(wave[1]/wave[0])*c FWHM_gal = 2.76 # SDSS has an instrumental resolution FWHM of 2.76A. templates, lamRange_temp = setup_spectral_library(velscale, FWHM_gal) # The galaxy and the template spectra do not have the same starting wavelength. # For this reason an extra velocity shift DV has to be applied to the template # to fit the galaxy spectrum. We remove this artificial shift by using the # keyword VSYST in the call to PPXF below, so that all velocities are # measured with respect to DV. This assume the redshift is negligible. # In the case of a high-redshift galaxy one should de-redshift its # wavelength to the rest frame before using the line below as described # in PPXF_KINEMATICS_EXAMPLE_SAURON. # c = 299792.458 dv = (np.log(lamRange_temp[0])-np.log(wave[0]))*c # km/s vel = c*z # Initial estimate of the galaxy velocity in km/s goodpixels = util.determine_goodpixels(np.log(wave),lamRange_temp,vel) # Here the actual fit starts. The best fit is plotted on the screen. # # IMPORTANT: Ideally one would like not to use any polynomial in the fit # as the continuum shape contains important information on the population. # Unfortunately this is often not feasible, due to small calibration # uncertainties in the spectral shape. To avoid affecting the line strength of # the spectral features, we exclude additive polynomials (DEGREE=-1) and only use # multiplicative ones (MDEGREE=10). This is only recommended for population, not # for kinematic extraction, where additive polynomials are always recommended. # start = [vel, 180.] # (km/s), starting guess for [V,sigma] # See the pPXF documentation for the keyword REGUL, # for an explanation of the following two lines # templates /= np.median(templates) # Normalizes templates by a scalar regul_err = 0.004 # Desired regularization error t = clock() plt.clf() plt.subplot(211) pp = ppxf(templates, galaxy, noise, velscale, start, goodpixels=goodpixels, plot=True, moments=4, degree=-1, vsyst=dv, clean=False, mdegree=10, regul=1./regul_err) # When the two numbers below are the same, the solution is the smoothest # consistent with the observed spectrum. # print('Desired Delta Chi^2: %.4g' % np.sqrt(2*goodpixels.size)) print('Current Delta Chi^2: %.4g' % ((pp.chi2 - 1)*goodpixels.size)) print('Elapsed time in PPXF: %.2f s' % (clock() - t)) plt.subplot(212) s = templates.shape print(s) weights = pp.weights.reshape(s[1],s[2])/pp.weights.sum() plt.imshow(np.rot90(weights), interpolation='nearest', cmap='gist_heat', aspect='auto', extent=(np.log10(1.0), np.log10(17.7828), -1.9, 0.45)) plt.colorbar() plt.title("Mass Fraction") plt.xlabel("log$_{10}$ Age (Gyr)") plt.ylabel("[M/H]") plt.tight_layout() plt.show() vazdekis = glob.glob('miles_models/Mun1.30*.fits') ''' for i in range(len(vazdekis)): print(vazdekis[i].rstrip('.fits').split('/')[-1], weights.flatten()[i]) ''' #------------------------------------------------------------------------------ if __name__ == '__main__': ppxf_population_example_sdss()
zpace/SparsePak-SFH
ppxf_population_example_sdss.py
Python
mit
8,798
[ "Galaxy" ]
f365dee0e33042a7e5d4afda4afe3f7fe0e38d983870fd7157a071e0122ed77d
#!/usr/bin/env python """ """ import vtk def main(): colors = vtk.vtkNamedColors() fileName = get_program_parameters() renderer1 = vtk.vtkRenderer() renderer1.SetViewport(0.0, 0.0, 0.5, 1.0) renderer2 = vtk.vtkRenderer() renderer2.SetViewport(0.5, 0.0, 1.0, 1.0) renderWindow = vtk.vtkRenderWindow() renderWindow.AddRenderer(renderer1) renderWindow.AddRenderer(renderer2) interactor = vtk.vtkRenderWindowInteractor() interactor.SetRenderWindow(renderWindow) # Create a cyberware source. # cyber = vtk.vtkPolyDataReader() cyber.SetFileName(fileName) deci = vtk.vtkDecimatePro() deci.SetInputConnection(cyber.GetOutputPort()) deci.SetTargetReduction(0.7) deci.PreserveTopologyOn() normals = vtk.vtkPolyDataNormals() normals.SetInputConnection(deci.GetOutputPort()) mask = vtk.vtkMaskPolyData() mask.SetInputConnection(deci.GetOutputPort()) mask.SetOnRatio(2) cyberMapper = vtk.vtkPolyDataMapper() cyberMapper.SetInputConnection(mask.GetOutputPort()) cyberActor = vtk.vtkActor() cyberActor.SetMapper(cyberMapper) cyberActor.GetProperty().SetColor(colors.GetColor3d("Flesh")) stripper = vtk.vtkStripper() stripper.SetInputConnection(cyber.GetOutputPort()) stripperMask = vtk.vtkMaskPolyData() stripperMask.SetInputConnection(stripper.GetOutputPort()) stripperMask.SetOnRatio(2) stripperMapper = vtk.vtkPolyDataMapper() stripperMapper.SetInputConnection(stripperMask.GetOutputPort()) stripperActor = vtk.vtkActor() stripperActor.SetMapper(stripperMapper) stripperActor.GetProperty().SetColor(colors.GetColor3d("Flesh")) # Add the actors to the renderer, set the background and size. # renderer1.AddActor(stripperActor) renderer2.AddActor(cyberActor) renderer1.SetBackground(colors.GetColor3d("Wheat")) renderer2.SetBackground(colors.GetColor3d("Papaya_Whip")) renderWindow.SetSize(1024, 640) # Render the image. # cam1 = vtk.vtkCamera() cam1.SetFocalPoint(0, 0, 0) cam1.SetPosition(1, 0, 0) cam1.SetViewUp(0, 1, 0) renderer1.SetActiveCamera(cam1) renderer2.SetActiveCamera(cam1) renderer1.ResetCamera() cam1.Azimuth(30) cam1.Elevation(30) cam1.Dolly(1.4) renderer1.ResetCameraClippingRange() interactor.Start() def get_program_parameters(): import argparse description = 'Triangle strip examples.' epilogue = ''' a) Structured triangle mesh consisting of 134 strips each of 390 triangles (stripF.tcl). b) Unstructured triangle mesh consisting of 2227 strips of average length 3.94, longest strip 101 triangles. Images are generated by displaying every other triangle strip (uStripeF.tcl). ''' parser = argparse.ArgumentParser(description=description, epilog=epilogue, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('filename1', help='fran_cut.vtk.') args = parser.parse_args() return args.filename1 if __name__ == '__main__': main()
lorensen/VTKExamples
src/Python/Rendering/StripFran.py
Python
apache-2.0
3,123
[ "VTK" ]
8e6bb924cc8982773a559e367f1eeb31833bfe0181da0701257ecc683789de53
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 from typing import Tuple, Union import numpy as np from ...core.acquisition import Acquisition from ...core.interfaces import IDifferentiable from ..methods import WarpedBayesianQuadratureModel class UncertaintySampling(Acquisition): """Uncertainty sampling acquisition function for (warped) Bayesian quadrature. The acquisition function has the form :math:`a(x) = \var{x}` for the Lebesgue measure, and :math:`a(x) = \var(x)p(x) ^ q` for a measure with density :math:`p(x)`. The default value for the power :math:`q` is 2, but it can be set to a different value. :math:`\var(x)` is the posterior variance of the approximate Gaussian process (GP) on the integrand. """ def __init__(self, model: Union[WarpedBayesianQuadratureModel, IDifferentiable], measure_power: float = 2): """ :param model: A warped Bayesian quadrature model that has gradients. :param measure_power: The power of the measure. Default is 2. Only used if the measure is not the Lebesgue measure. """ self.model = model self._measure_power = measure_power def has_gradients(self) -> bool: return True def _evaluate(self, x: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: """Evaluate the predictive variances and the acquisition function. :param x: Locations at which to evaluate the acquisition function, shape (num_points x num_dim). :return: Values of the acquisition function at x and unweighted variances. Both shape (num_points x 1). """ variances = self.model.predict(x)[1] if self.model.measure is None: return variances, variances else: weights = self.model.measure.compute_density(x).reshape(variances.shape) return variances * weights ** self._measure_power, variances def evaluate(self, x: np.ndarray) -> np.ndarray: """Evaluate the acquisition function. :param x: Locations at which to evaluate the acquisition function, shape (num_points x num_dim). :return: Values of the acquisition function at x, shape (num_points x 1). """ return self._evaluate(x)[0] def evaluate_with_gradients(self, x: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: """Evaluate the acquisition function and compute its gradients. :param x: Locations at which to evaluate the acquisition function, shape (num_points x num_dim). :return: Values of the acquisition function at x, shape (num_points x 1), and corresponding gradients, shape (num_points, num_dim). """ p = self._measure_power variance_weighted, variance = self._evaluate(x) variance_gradient = self.model.get_prediction_gradients(x)[1] if self.model.measure is None: return variance, variance_gradient density = self.model.measure.compute_density(x) density_gradient = self.model.measure.compute_density_gradient(x) if p == 1: gradient_weighted = (density * variance_gradient.T).T + (variance[:, 0] * density_gradient.T).T return variance_weighted, gradient_weighted gradient_weighted = (density ** p * variance_gradient.T).T + ( p * (variance[:, 0] * density ** (p - 1)) * density_gradient.T ).T return variance_weighted, gradient_weighted
EmuKit/emukit
emukit/quadrature/acquisitions/uncertainty_sampling.py
Python
apache-2.0
3,512
[ "Gaussian" ]
488fbc5ba5eb93f696a03666ea3b37f49d790618e12183cfc3626970f952c229
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """The Normal (Gaussian) distribution class.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import math from tensorflow.contrib.distributions.python.ops import distribution from tensorflow.contrib.distributions.python.ops import kullback_leibler from tensorflow.contrib.distributions.python.ops import special_math from tensorflow.contrib.framework.python.framework import tensor_util as contrib_tensor_util from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.ops import array_ops from tensorflow.python.ops import check_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn from tensorflow.python.ops import random_ops __all__ = [ "Normal", "NormalWithSoftplusScale", ] class Normal(distribution.Distribution): """The Normal distribution with location `loc` and `scale` parameters. #### Mathematical details The probability density function (pdf) is, ```none pdf(x; mu, sigma) = exp(-0.5 (x - mu)**2 / sigma**2) / Z Z = (2 pi sigma**2)**0.5 ``` where `loc = mu` is the mean, `scale = sigma` is the std. deviation, and, `Z` is the normalization constant. The Normal distribution is a member of the [location-scale family]( https://en.wikipedia.org/wiki/Location-scale_family), i.e., it can be constructed as, ```none X ~ Normal(loc=0, scale=1) Y = loc + scale * X ``` #### Examples Examples of initialization of one or a batch of distributions. ```python # Define a single scalar Normal distribution. dist = tf.contrib.distributions.Normal(loc=0., scale=3.) # Evaluate the cdf at 1, returning a scalar. dist.cdf(1.) # Define a batch of two scalar valued Normals. # The first has mean 1 and standard deviation 11, the second 2 and 22. dist = tf.contrib.distributions.Normal(loc=[1, 2.], scale=[11, 22.]) # Evaluate the pdf of the first distribution on 0, and the second on 1.5, # returning a length two tensor. dist.prob([0, 1.5]) # Get 3 samples, returning a 3 x 2 tensor. dist.sample([3]) ``` Arguments are broadcast when possible. ```python # Define a batch of two scalar valued Normals. # Both have mean 1, but different standard deviations. dist = tf.contrib.distributions.Normal(loc=1., scale=[11, 22.]) # Evaluate the pdf of both distributions on the same point, 3.0, # returning a length 2 tensor. dist.prob(3.0) ``` """ def __init__(self, loc, scale, validate_args=False, allow_nan_stats=True, name="Normal"): """Construct Normal distributions with mean and stddev `loc` and `scale`. The parameters `loc` and `scale` must be shaped in a way that supports broadcasting (e.g. `loc + scale` is a valid operation). Args: loc: Floating point tensor; the means of the distribution(s). scale: Floating point tensor; the stddevs of the distribution(s). Must contain only positive values. validate_args: Python `bool`, default `False`. When `True` distribution parameters are checked for validity despite possibly degrading runtime performance. When `False` invalid inputs may silently render incorrect outputs. allow_nan_stats: Python `bool`, default `True`. When `True`, statistics (e.g., mean, mode, variance) use the value "`NaN`" to indicate the result is undefined. When `False`, an exception is raised if one or more of the statistic's batch members are undefined. name: Python `str` name prefixed to Ops created by this class. Raises: TypeError: if `loc` and `scale` have different `dtype`. """ parameters = locals() with ops.name_scope(name, values=[loc, scale]) as ns: with ops.control_dependencies([check_ops.assert_positive(scale)] if validate_args else []): self._loc = array_ops.identity(loc, name="loc") self._scale = array_ops.identity(scale, name="scale") contrib_tensor_util.assert_same_float_dtype([self._loc, self._scale]) super(Normal, self).__init__( dtype=self._scale.dtype, reparameterization_type=distribution.FULLY_REPARAMETERIZED, validate_args=validate_args, allow_nan_stats=allow_nan_stats, parameters=parameters, graph_parents=[self._loc, self._scale], name=ns) @staticmethod def _param_shapes(sample_shape): return dict( zip(("loc", "scale"), ([ops.convert_to_tensor( sample_shape, dtype=dtypes.int32)] * 2))) @property def loc(self): """Distribution parameter for the mean.""" return self._loc @property def scale(self): """Distribution parameter for standard deviation.""" return self._scale def _batch_shape_tensor(self): return array_ops.broadcast_dynamic_shape( array_ops.shape(self.loc), array_ops.shape(self.scale)) def _batch_shape(self): return array_ops.broadcast_static_shape( self.loc.get_shape(), self.scale.get_shape()) def _event_shape_tensor(self): return constant_op.constant([], dtype=dtypes.int32) def _event_shape(self): return tensor_shape.scalar() def _sample_n(self, n, seed=None): shape = array_ops.concat([[n], self.batch_shape_tensor()], 0) sampled = random_ops.random_normal( shape=shape, mean=0., stddev=1., dtype=self.loc.dtype, seed=seed) return sampled * self.scale + self.loc def _log_prob(self, x): return self._log_unnormalized_prob(x) - self._log_normalization() def _prob(self, x): return math_ops.exp(self._log_prob(x)) def _log_cdf(self, x): return special_math.log_ndtr(self._z(x)) def _cdf(self, x): return special_math.ndtr(self._z(x)) def _log_survival_function(self, x): return special_math.log_ndtr(-self._z(x)) def _survival_function(self, x): return special_math.ndtr(-self._z(x)) def _log_unnormalized_prob(self, x): return -0.5 * math_ops.square(self._z(x)) def _log_normalization(self): return 0.5 * math.log(2. * math.pi) + math_ops.log(self.scale) def _entropy(self): # Use broadcasting rules to calculate the full broadcast scale. scale = self.scale * array_ops.ones_like(self.loc) return 0.5 * math.log(2. * math.pi * math.e) + math_ops.log(scale) def _mean(self): return self.loc * array_ops.ones_like(self.scale) def _stddev(self): return self.scale * array_ops.ones_like(self.loc) def _mode(self): return self._mean() def _z(self, x): """Standardize input `x` to a unit normal.""" with ops.name_scope("standardize", values=[x]): return (x - self.loc) / self.scale class NormalWithSoftplusScale(Normal): """Normal with softplus applied to `scale`.""" def __init__(self, loc, scale, validate_args=False, allow_nan_stats=True, name="NormalWithSoftplusScale"): parameters = locals() with ops.name_scope(name, values=[scale]) as ns: super(NormalWithSoftplusScale, self).__init__( loc=loc, scale=nn.softplus(scale, name="softplus_scale"), validate_args=validate_args, allow_nan_stats=allow_nan_stats, name=ns) self._parameters = parameters @kullback_leibler.RegisterKL(Normal, Normal) def _kl_normal_normal(n_a, n_b, name=None): """Calculate the batched KL divergence KL(n_a || n_b) with n_a and n_b Normal. Args: n_a: instance of a Normal distribution object. n_b: instance of a Normal distribution object. name: (optional) Name to use for created operations. default is "kl_normal_normal". Returns: Batchwise KL(n_a || n_b) """ with ops.name_scope(name, "kl_normal_normal", [n_a.loc, n_b.loc]): one = constant_op.constant(1, dtype=n_a.dtype) two = constant_op.constant(2, dtype=n_a.dtype) half = constant_op.constant(0.5, dtype=n_a.dtype) s_a_squared = math_ops.square(n_a.scale) s_b_squared = math_ops.square(n_b.scale) ratio = s_a_squared / s_b_squared return (math_ops.square(n_a.loc - n_b.loc) / (two * s_b_squared) + half * (ratio - one - math_ops.log(ratio)))
HKUST-SING/tensorflow
tensorflow/contrib/distributions/python/ops/normal.py
Python
apache-2.0
9,119
[ "Gaussian" ]
d84665c7532633efba1a1998b87a6395a4c891edf5b64278921c34d9f60eaa90
############################################################################## # Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. # # This file is part of Spack. # Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved. # LLNL-CODE-647188 # # For details, see https://github.com/spack/spack # Please also see the NOTICE and LICENSE files for our notice and the LGPL. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License (as # published by the Free Software Foundation) version 2.1, February 1999. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and # conditions of the GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## from spack import * class RAffycompatible(RPackage): """This package provides an interface to Affymetrix chip annotation and sample attribute files. The package allows an easy way for users to download and manage local data bases of Affynmetrix NetAffx annotation files. The package also provides access to GeneChip Operating System (GCOS) and GeneChip Command Console (AGCC)-compatible sample annotation files.""" homepage = "https://www.bioconductor.org/packages/AffyCompatible/" url = "https://git.bioconductor.org/packages/AffyCompatible" version('1.36.0', 'https://git.bioconductor.org/packages/AffyCompatible', commit='dbbfd43a54ae1de6173336683a9461084ebf38c3') depends_on('r@3.4.0:3.4.9', when=('@1.36.0')) depends_on('r-xml', type=('build', 'run')) depends_on('r-rcurl', type=('build', 'run')) depends_on('r-biostrings', type=('build', 'run'))
skosukhin/spack
var/spack/repos/builtin/packages/r-affycompatible/package.py
Python
lgpl-2.1
2,149
[ "Bioconductor" ]
73d077e6692b1d850414f7b449b898eb025613807002c17470f85976c026a8a0
import subprocess import logging import csv import os class SequenceSearchResult: QUERY_FROM_FIELD = 'query_from' QUERY_TO_FIELD = 'query_to' QUERY_LENGTH_FIELD = 'query_length' HIT_FROM_FIELD = 'hit_from' HIT_TO_FIELD = 'hit_to' ALIGNMENT_LENGTH_FIELD = 'alignment_length' ALIGNMENT_BIT_SCORE = 'alignment_bit_score' ALIGNMENT_DIRECTION = 'alignment_direction' HIT_ID_FIELD = 'hit_id' QUERY_ID_FIELD = 'query_id' HMM_NAME_FIELD = 'hmm_name' ACCESSION_ID_FIELD = 'accession_id' PERCENT_ID_FIELD = 'percent_id' MISMATCH_FIELD = "mismatch" EVALUE_FIELD = "evalue" def __init__(self): self.fields = [] self.results = [] def each(self, field_names): """Iterate over the results, yielding a list for each result, where each element corresponds to the field given in the field_name parameters Parameters ---------- field_names: list of str The names of the fields to be returned during iteration Returns ------- None Exceptions ---------- raises something when a field name is not in self.fields """ field_ids = [] for f in field_names: # below raises error if the field name is not found, so # don't need to account for that. field_ids.append(self.fields.index(f)) for r in self.results: yield([r[i] for i in field_ids]) class DiamondSearchResult(SequenceSearchResult): @staticmethod def import_from_daa_file(daa_filename): '''Generate new results object from the output of diamond blastx/p''' # blast m8 format is # 'qseqid sseqid pident length mismatch gapopen qstart qend sstart send evalue bitscore res = DiamondSearchResult() res.fields = [ SequenceSearchResult.QUERY_ID_FIELD, SequenceSearchResult.HIT_ID_FIELD, SequenceSearchResult.PERCENT_ID_FIELD, SequenceSearchResult.ALIGNMENT_LENGTH_FIELD, SequenceSearchResult.MISMATCH_FIELD, #skip SequenceSearchResult.QUERY_FROM_FIELD, SequenceSearchResult.QUERY_TO_FIELD, SequenceSearchResult.HIT_FROM_FIELD, SequenceSearchResult.HIT_TO_FIELD, SequenceSearchResult.EVALUE_FIELD, SequenceSearchResult.ALIGNMENT_BIT_SCORE, # extras SequenceSearchResult.ALIGNMENT_DIRECTION, SequenceSearchResult.HMM_NAME_FIELD ] cmd = "diamond view -a '%s'" % daa_filename logging.debug("Running cmd: %s" % cmd) process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) stdout, stderr = process.communicate() reader = csv.reader(stdout.decode('ascii').splitlines(), delimiter='\t') if process.returncode != 0: raise Exception("Problem running diamond view with cmd: '%s'," "stderr was %s" % (cmd, stderr)) for row in reader: # 'qseqid sseqid pident length mismatch gapopen qstart qend sstart send evalue bitscore # 0 1 2 3 4 5 6 7 8 9 10 11 query_start = int(row[6]) query_end = int(row[7]) res.results.append([row[0], row[1], row[2], row[3], row[4], query_start, query_end, int(row[8]), int(row[9]), row[10], row[11], query_start < query_end, os.path.basename(daa_filename) ]) return res class HMMSearchResult(SequenceSearchResult): @staticmethod def import_from_nhmmer_table(hmmout_path): '''Generate new results object from the output of nhmmer search''' # nhmmer format is # qseqid queryname hmmfrom hmmto alifrom alito envfrom envto sqlen strand evalue bitscore bias description # 0 2 4 5 6 7 8 9 10 11 12 13 14 15 res=HMMSearchResult() res.fields = [ SequenceSearchResult.QUERY_ID_FIELD, SequenceSearchResult.HMM_NAME_FIELD, SequenceSearchResult.ALIGNMENT_LENGTH_FIELD, SequenceSearchResult.QUERY_FROM_FIELD, SequenceSearchResult.QUERY_TO_FIELD, SequenceSearchResult.HIT_FROM_FIELD, SequenceSearchResult.HIT_TO_FIELD, SequenceSearchResult.ALIGNMENT_BIT_SCORE, SequenceSearchResult.ALIGNMENT_DIRECTION, ] for row in [x.rstrip().split() for x in open(hmmout_path) if not x.startswith('#')]: alifrom = int(row[6]) alito = int(row[7]) aln_length = (alito-alifrom if alito-alifrom>0 else alifrom-alito) res.results.append([row[0], row[2], aln_length, int(row[4]), int(row[5]), alifrom, alito, row[13], alito > alifrom ]) return res @staticmethod def import_from_hmmsearch_table(hmmout_path): '''Generate new results object from the output of hmmsearch search''' # hmmsearch format is # qseqid tlen queryname qlen evalue bitscore bias hmmfrom hmmto alifrom alito envfrom envto acc # 0 2 3 5 6 7 8 15 16 17 18 19 20 21 res=HMMSearchResult() res.fields = [ SequenceSearchResult.QUERY_ID_FIELD, SequenceSearchResult.HMM_NAME_FIELD, SequenceSearchResult.ACCESSION_ID_FIELD, SequenceSearchResult.QUERY_LENGTH_FIELD, SequenceSearchResult.ALIGNMENT_LENGTH_FIELD, SequenceSearchResult.QUERY_FROM_FIELD, SequenceSearchResult.QUERY_TO_FIELD, SequenceSearchResult.HIT_FROM_FIELD, SequenceSearchResult.HIT_TO_FIELD, SequenceSearchResult.ALIGNMENT_BIT_SCORE, SequenceSearchResult.ALIGNMENT_DIRECTION, ] with open(hmmout_path) as f: for row in [x.rstrip().split() for x in f if not x.startswith('#')]: alifrom = int(row[17]) alito = int(row[18]) aln_length = (alito-alifrom if alito-alifrom>0 else alifrom-alito) if alito != alifrom: #this actually happens.. res.results.append([row[0], row[3], row[4], row[5], aln_length, int(row[15]), int(row[16]), alifrom, alito, row[7], True ]) return res
geronimp/graftM
graftm/sequence_search_results.py
Python
gpl-3.0
8,078
[ "BLAST" ]
b2873be5abef731c7971271c6e33bbeaee204d0a7cab33ef64c637443270da9d
# # Copyright (C) 2013-2019 The ESPResSo project # # This file is part of ESPResSo. # # ESPResSo is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # ESPResSo is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # """ Set up a lattice-Boltzmann fluid and apply an external force density on it. """ import matplotlib.pyplot as plt import numpy as np import argparse parser = argparse.ArgumentParser(epilog=__doc__) group = parser.add_mutually_exclusive_group() group.add_argument('--cpu', action='store_true') group.add_argument('--gpu', action='store_true') args = parser.parse_args() print(""" ======================================================= = lattice-Boltzmann fluid example = ======================================================= """) required_features = ["EXTERNAL_FORCES"] if args.gpu: print("Using GPU implementation") required_features.append("CUDA") else: print("Using CPU implementation") if not args.cpu: print("(select the implementation with --cpu or --gpu)") import espressomd espressomd.assert_features(required_features) import espressomd.lb box_l = 50 system = espressomd.System(box_l=[box_l] * 3) system.set_random_state_PRNG() system.time_step = 0.01 system.cell_system.skin = 0.1 system.part.add(pos=[box_l / 2.0] * 3, fix=[1, 1, 1]) lb_params = {'agrid': 1, 'dens': 1, 'visc': 1, 'tau': 0.01, 'ext_force_density': [0, 0, -1.0 / (box_l**3)]} if args.gpu: lbf = espressomd.lb.LBFluidGPU(**lb_params) else: lbf = espressomd.lb.LBFluid(**lb_params) system.actors.add(lbf) system.thermostat.set_lb(LB_fluid=lbf, gamma=1.0) print(lbf.get_params()) f_list = np.zeros((10, 3)) for i in range(10): f_list[i] = system.part[0].f system.integrator.run(steps=10) print(i) fig1 = plt.figure() ax = fig1.add_subplot(111) ax.plot(f_list[:, 0], label=r"$F_x$") ax.plot(f_list[:, 1], label=r"$F_y$") ax.plot(f_list[:, 2], label=r"$F_z$") ax.legend() ax.set_xlabel("t") ax.set_ylabel("F") plt.show()
psci2195/espresso-ffans
samples/lbf.py
Python
gpl-3.0
2,503
[ "ESPResSo" ]
6454c8dba01d16d3671b3dfb85dcc0d756cf0de231f7fcfa43bb79e03352e5bd
""" This module implements all the functions to communicate with other Python modules (PIL, matplotlib, mayavi, etc.) """ import numpy as np def PIL_to_npimage(im): """Transforms a PIL/Pillow image into a numpy RGB(A) image. Actually all this do is returning numpy.array(im).""" return np.array(im) # w,h = im.size # d = (4 if im.mode=="RGBA" else 3) # return +np.frombuffer(im.tobytes(), dtype='uint8').reshape((h,w,d)) def mplfig_to_npimage(fig): """ Converts a matplotlib figure to a RGB frame after updating the canvas""" # only the Agg backend now supports the tostring_rgb function from matplotlib.backends.backend_agg import FigureCanvasAgg canvas = FigureCanvasAgg(fig) canvas.draw() # update/draw the elements # get the width and the height to resize the matrix l, b, w, h = canvas.figure.bbox.bounds w, h = int(w), int(h) # exports the canvas to a string buffer and then to a numpy nd.array buf = canvas.tostring_rgb() image = np.frombuffer(buf, dtype=np.uint8) return image.reshape(h, w, 3)
kerstin/moviepy
moviepy/video/io/bindings.py
Python
mit
1,085
[ "Mayavi" ]
27584bd594f17c8beb8fe1e655ea66d268fae8129a18e2aab98a1d017265e327
# -*- coding: utf-8 -*- # Copyright (c) 2004-2014 Alterra, Wageningen-UR # Allard de Wit (allard.dewit@wur.nl), April 2014 from __future__ import print_function import datetime from copy import deepcopy import numpy as np from ..pydispatch import dispatcher from ..traitlets import Float, Int, Instance, Enum, Unicode, Bool, AfgenTrait from ..decorators import prepare_rates, prepare_states from math import log10, sqrt, exp from ..util import limit, Afgen, merge_dict from ..base_classes import ParamTemplate, StatesTemplate, RatesTemplate, \ SimulationObject from .. import signals from .. import exceptions as exc def zeros(n): """Mimic np.zeros() by returning a list of zero floats of length n. """ if isinstance(n, int): if n > 0: return [0.]*n msg = "zeros() should be called with positive integer, got: %s" % n raise ValueError(msg) #------------------------------------------------------------------------------- class WaterbalanceLayered(SimulationObject): """Waterbalance for freely draining soils under water-limited production. The purpose of the soil water balance calculations is to estimate the daily value of the soil moisture content. The soil moisture content influences soil moisture uptake and crop transpiration. The dynamic calculations are carried out in two sections, one for the calculation of rates of change per timestep (= 1 day) and one for the calculation of summation variables and state variables. The water balance is driven by rainfall, possibly buffered as surface storage, and evapotranspiration. The processes considered are infiltration, soil water retention, percolation (here conceived as downward water flow from rooted zone to second layer), and the loss of water beyond the maximum root zone. The textural profile of the soil is conceived as homogeneous. Initially the soil profile consists of two layers, the actually rooted soil and the soil immediately below the rooted zone until the maximum rooting depth (soil and crop dependent). The extension of the root zone from initial rooting depth to maximum rooting depth is described in Root_Dynamics class. From the moment that the maximum rooting depth is reached the soil profile is described as a one layer system. The class WaterbalanceLayered is derived from WATFDGW.F90 in WOFOSTx.x (release March 2012) """ # INTERNALS RDold = Float(-99.) # previous maximum rooting depth value RDMSLB = Float(-99.) # max rooting depth soil layer boundary DSLR = Float(-99.) # Counter for Days-Dince-Last-Rain RINold = Float(-99) # Infiltration rate of previous day XDEF = Float(1000.0) # maximum depth of groundwater (in cm) PFFC = Float(2.0) # PF field capacity, Float(log10(200.)) PFWP = Float(log10(16000.)) # PF wilting point PFSAT = Float(-1.0) # PF saturation EquilTableLEN = Int(30) # GW: WaterFromHeight, HeightFromAir MaxFlowIter = Int(50) # Fraction of non-infiltrating rainfall as function of storm size NINFTB = Instance(Afgen) # ------------------------------------------ # ILaR: code taken from classic waterbalance # ------------------------------------------ flag_crop_emerged = Bool(False) flag_crop_finished = Bool(False) # ------------------------------------------ class Parameters(ParamTemplate): GW = Int(-99) # groundwater ZTI = Int(-99) # inital depth of groundwater DD = Int(-99) # drainage depth RDMSOL = Float(-99.) # not used for waterbalance # soil layers NSL = Int(-99) # from soildata, number SOIL_LAYERS= Instance(list) # from soildata, layers (TSL+SOIL_GROUP_NO+typ) # crop parameters IAIRDU = Float(-99.) RDMCR = Float(-99.) RDI = Float(-99.) #RD = Float(-99.) # root_dynamics #RDM = Float(-99.) # root_dynamics # Site parameters IFUNRN = Float(-99.) #Int(-99)? SSMAX = Float(-99.) SSI = Float(-99.) WAV = Float(-99.) NOTINF = Float(-99.) SMLIM = Float(-99.) class StateVariables(StatesTemplate): SM = Float(-99.) SS = Float(-99.) W = Float(-99.) WI = Float(-99.) WLOW = Float(-99.) WBOT = Float(-99.) WLOWI = Float(-99.) WWLOW = Float(-99.) # W + WLOW WWLOWI = Float(-99.) WAVUPP = Float(-99.) WAVLOW = Float(-99.) RunOff = Float(-99.) #LOSS = Float(-99.) #RIN = Float(-99.) # IN? # GW, groundwater WZ = Float(-99.) WZI = Float(-99.) ZT = Float(-99.) WSUB0 = Float(-99.) WSUB = Float(-99.) # layer ILR = Int(-99) ILM = Int(-99) # Summation variables WTRAT = Float(-99.) EVST = Float(-99.) EVWT = Float(-99.) TSR = Float(-99.) RAINT = Float(-99.) WDRT = Float(-99.) TOTINF = Float(-99.) TOTIRR = Float(-99.) SUMSM = Float(-99.) PERCT = Float(-99.) LOSST = Float(-99.) CRT = Float(-99.) # GW DRAINT = Float(0.) # GW, always 0 # Checksums for rootzone (RT) and total system (TT) WBALRT = Float(-99.) WBALTT = Float(-99.) class RateVariables(RatesTemplate): EVS = Float(-99.) EVW = Float(-99.) RIN = Float(-99.) RIRR = Float(0.) # always 0 PERC = Float(-99.) LOSS = Float(-99.) DW = Float(-99.) DWLOW = Float(-99.) DWBOT = Float(-99.) # GW DWSUB = Float(-99.) # GW CR = Float(-99.) DMAX = Float(-99.) DZ = Float(-99.) RAIN = Float(-99.) # weather WTRA = Float(-99.) # evapotranspiration WTRAL = Instance(np.ndarray) # evapotranspiration class Layer(dict): # NOT USED as such ... in SOIL_LAYERS #soil_layers SOIL_GROUP_NO = Int(-99) TSL = Float(-99.) # thickness #soildata SMFCF = Float(-99.) SM0 = Float(-99.) SMW = Float(-99.) SOPE = Float(-99.) # soil surface conductivity KSUB = Float(-99.) K0 = Float(-99.) #CRAIRC = Float(-99.) # not used for waterbalance CONTAB = AfgenTrait() # Conductivity from PF SMTAB = AfgenTrait() # Soil Moisture from PF #derived from soildata PFTAB = AfgenTrait() # Soil Moisture from PF MFPTAB = AfgenTrait() # Matrix Flux Potential WaterFromHeight = AfgenTrait() # HeightFromAir = AfgenTrait() # LBSL = Float(-99.) # lower boundary Wtop = Float(-99.) # layer weight factors Wpot = Float(-99.) Wund = Float(-99.) WC = Float(-99.) WC0 = Float(-99.) WCW = Float(-99.) WCFC = Float(-99.) CondFC = Float(-99.) CondK0 = Float(-99.) SM = Float(-99.) DWC = Float(-99.) def initialize(self, day, kiosk, cropdata, soildata, sitedata, missing): # Merge dictionaries in order to pass them to the Parameters class. # use merge_dict iso deepcopy? parvalues = deepcopy(cropdata) parvalues.update(soildata) parvalues.update(sitedata) parvalues.update(missing) parvalues["RDMSOL"] = parvalues["SOIL_LAYERS"][-1]["LBSL"] # Assign parameter values self.params = self.Parameters(parvalues) p = self.params # ------ checks ------ RD = p.RDI #classic: RDM = max(p.RDI, min(p.RDMSOL, p.RDMCR)) RDM = max(p.RDI, p.RDMCR) #print "initialize WaterbalanceLayered NSL %i, GW %s, RD %f" % (p.NSL, p.GW, RD) if RD > RDM: msg = ("rooting depth %f exceeeds maximum rooting depth %f" % (RD, RDM)) raise exc.WaterBalanceError(msg) RDMFND = False for il in range (0, p.NSL): if abs(p.SOIL_LAYERS[il]['LBSL'] - RDM) < 0.01: RDMFND = True # layer boundary explicitly assigned to maximum rooting depth self.RDMSLB = p.SOIL_LAYERS[il]['LBSL'] # also guarantees that RDM is within the layered part of the soil if not RDMFND: msg = ("Maximum rooting depth (RDM) %f does not coincide " + "with a layer boundary in soil profile" % RDM) raise exc.WaterBalanceError(msg) # in case of groundwater the reference depth XDEF should be below the layered soil if p.GW: if self.XDEF <= p.SOIL_LAYERS[p.NSL-1]['LBSL']: msg = ("Reference depth XDEF (%f cm) must be below the " + "bottom of the soil layers" % self.XDEF) raise exc.WaterBalanceError(msg) # --- end of checks --- # find deepest layer with roots ILR = p.NSL-1 ILM = p.NSL-1 for il in range(p.NSL-1, -1, -1): if (p.SOIL_LAYERS[il]['LBSL'] >= RD ): ILR = il if (p.SOIL_LAYERS[il]['LBSL'] >= self.RDMSLB): ILM = il # calculate layer weight for RD-rooted layer and RDM-rooted layer self._layer_weights(RD, self.RDMSLB, ILR, ILM) # --- end of soil input section --- # save old rooting depth (for testing on growth in integration) self.RDold = RD SS = p.SSI # Initial surface storage # state variables set initially by self.StateVariables W = 0.0 WAVUPP = 0.0 WLOW = 0.0 WAVLOW = 0.0 WBOT = 0.0 if p.GW: # calculate initial soil moisture ZT = limit(0.1, self.XDEF, p.ZTI) # initial groundwater level if p.DD > 0.: # IDRAIN==1 ??? ZT = max(ZT, p.DD) # corrected for drainage depth #for the soil layers for il in range (0, p.NSL): HH = p.SOIL_LAYERS[il]['LBSL'] - p.SOIL_LAYERS[il]['TSL'] / 2.0 # depth at half-layer-height if p.SOIL_LAYERS[il]['LBSL'] - ZT < 0.0: # layer is above groundwater ; get equilibrium amount from Half-Height pressure head p.SOIL_LAYERS[il]['SM'] = p.SOIL_LAYERS[il]['SOILTYPE']['SMTAB'](log10(ZT-HH)) elif p.SOIL_LAYERS[il]['LBSL']-p.SOIL_LAYERS[il]['TSL'] >= ZT: # layer completely in groundwater p.SOIL_LAYERS[il]['SM'] = p.SOIL_LAYERS[il]['SOILTYPE']['SM0'] else: # layer partly in groundwater p.SOIL_LAYERS[il]['SM'] = (p.SOIL_LAYERS[il]['LBSL']-ZT)*p.SOIL_LAYERS[il]['SOILTYPE']['SM0'] \ + p.SOIL_LAYERS[il]['SOILTYPE']['WaterFromHeight'](ZT-(p.SOIL_LAYERS[il]['LBSL']-p.SOIL_LAYERS[il]['TSL']) \ ) / p.SOIL_LAYERS[il]['TSL'] # calculate (available) water in rooted and potentially rooted zone # note that amounts WBOT below RDM (RDMSLB) are not available (below potential rooting depth) for il in range (0, p.NSL): W += p.SOIL_LAYERS[il]['SM'] * p.SOIL_LAYERS[il]['TSL'] * p.SOIL_LAYERS[il]['Wtop'] WLOW += p.SOIL_LAYERS[il]['SM'] * p.SOIL_LAYERS[il]['TSL'] * p.SOIL_LAYERS[il]['Wpot'] WBOT += p.SOIL_LAYERS[il]['SM'] * p.SOIL_LAYERS[il]['TSL'] * p.SOIL_LAYERS[il]['Wund'] # available water WAVUPP += (p.SOIL_LAYERS[il]['SM']-p.SOIL_LAYERS[il]['SOILTYPE']['SMW']) \ * p.SOIL_LAYERS[il]['TSL'] * p.SOIL_LAYERS[il]['Wtop'] WAVLOW += (p.SOIL_LAYERS[il]['SM']-p.SOIL_LAYERS[il]['SOILTYPE']['SMW']) \ * p.SOIL_LAYERS[il]['TSL'] * p.SOIL_LAYERS[il]['Wpot'] # now various subsoil amonts WSUB0 = (self.XDEF-p.SOIL_LAYERS[p.NSL-1]['LBSL']) * p.SOIL_LAYERS[p.NSL-1]['SOILTYPE']['SM0'] # saturation if ZT > p.SOIL_LAYERS[p.NSL-1]['LBSL']: # groundwater below layered system WSUB = (self.XDEF-ZT)*p.SOIL_LAYERS[p.NSL-1]['SOILTYPE']['SM0'] \ + p.SOIL_LAYERS[p.NSL-1]['SOILTYPE']['WaterFromHeight'](ZT-p.SOIL_LAYERS[p.NSL-1]['LBSL']) else: # saturated subsoil WSUB = WSUB0 # then amount of moisture below rooted zone WZ = WLOW + WBOT + WSUB WZI = WZ else: # not GW # AVMAX - maximum available content of layer(s) # to get an even distribution of water in the rooted top if WAV is small. AVMAX = np.zeros(p.NSL) TOPRED = Instance(Float) LOWRED = Instance(Float) SML = Instance(Float) TOPLIM = 0.0 LOWLIM = 0.0 for il in range (0, ILM+1): # determine maximum content for this layer if il <= ILR: # in the rooted zone a separate limit applies SML = p.SMLIM SML = limit(p.SOIL_LAYERS[il]['SOILTYPE']['SMW'], p.SOIL_LAYERS[il]['SOILTYPE']['SM0'], SML) # Check whether SMLIM is within boundaries if p.IAIRDU == 1: # applicable only for flooded rice crops SML = p.SOIL_LAYERS[il]['SOILTYPE']['SM0'] # notify user of changes in SMLIM if SML != p.SMLIM: msg = "SMLIM not in valid range, changed from %f to %f." self.logger.warn(msg % (p.SMLIM, SML)) AVMAX[il] = (SML-p.SOIL_LAYERS[il]['SOILTYPE']['SMW']) * p.SOIL_LAYERS[il]['TSL'] # available in cm # also if partly rooted, the total layer capacity counts in TOPLIM # this means the water content of layer ILR is set as if it would be # completely rooted. This water will become available after a little # root growth and through numerical mixing each time step. TOPLIM = TOPLIM + AVMAX[il] else: # below the rooted zone the maximum is saturation (see code for WLOW in one-layer model) # again the full layer capacity adds to LOWLIM. SML = p.SOIL_LAYERS[il]['SOILTYPE']['SM0'] AVMAX[il] = (SML-p.SOIL_LAYERS[il]['SOILTYPE']['SMW']) * p.SOIL_LAYERS[il]['TSL'] # available in cm LOWLIM += AVMAX[il] if p.WAV <= 0.0: # no available water TOPRED = 0.0 LOWRED = 0.0 elif p.WAV <= TOPLIM: # available water fits in layer(s) 1..ILR, these layers are rooted or almost rooted # reduce amounts with ratio WAV / TOPLIM TOPRED = p.WAV / TOPLIM LOWRED = 0.0 elif p.WAV < TOPLIM+LOWLIM: # available water fits in potentially rooted layer # rooted zone is filled at capacity ; the rest reduced TOPRED = 1.0 LOWRED = (p.WAV-TOPLIM) / LOWLIM else: # water does not fit ; all layers "full" TOPRED = 1.0 LOWRED = 1.0 # within rootzone for il in range (0, ILR+1): # Part of the water assigned to ILR may not actually be in the rooted zone, but it will # be available shortly through root growth (and through numerical mixing). p.SOIL_LAYERS[il]['SM'] = p.SOIL_LAYERS[il]['SOILTYPE']['SMW'] + AVMAX[il] * TOPRED /p.SOIL_LAYERS[il]['TSL'] W += p.SOIL_LAYERS[il]['SM'] * p.SOIL_LAYERS[il]['TSL'] * p.SOIL_LAYERS[il]['Wtop'] WLOW += p.SOIL_LAYERS[il]['SM'] * p.SOIL_LAYERS[il]['TSL'] * p.SOIL_LAYERS[il]['Wpot'] # available water WAVUPP += (p.SOIL_LAYERS[il]['SM']-p.SOIL_LAYERS[il]['SOILTYPE']['SMW']) \ * p.SOIL_LAYERS[il]['TSL'] * p.SOIL_LAYERS[il]['Wtop'] WAVLOW += (p.SOIL_LAYERS[il]['SM']-p.SOIL_LAYERS[il]['SOILTYPE']['SMW']) \ * p.SOIL_LAYERS[il]['TSL'] * p.SOIL_LAYERS[il]['Wpot'] # between initial and maximum rooting depth. In case RDM is not a layer boundary (it should be!!) # layer ILM contains additional water in unrooted part. Only rooted part contributes to WAV. for il in range (ILR+1, ILM+1): p.SOIL_LAYERS[il]['SM'] = p.SOIL_LAYERS[il]['SOILTYPE']['SMW'] + \ AVMAX[il] * LOWRED / p.SOIL_LAYERS[il]['TSL'] WLOW += p.SOIL_LAYERS[il]['SM'] * p.SOIL_LAYERS[il]['TSL'] * p.SOIL_LAYERS[il]['Wpot'] # available water WAVLOW += (p.SOIL_LAYERS[il]['SM']-p.SOIL_LAYERS[il]['SOILTYPE']['SMW']) \ * p.SOIL_LAYERS[il]['TSL'] * p.SOIL_LAYERS[il]['Wpot'] # below the maximum rooting depth for il in range (ILM+1, p.NSL): p.SOIL_LAYERS[il]['SM'] = p.SOIL_LAYERS[il]['SOILTYPE']['SMW'] # set groundwater depth far away for clarity ; this prevents also # the root routine to stop root growth when they reach the groundwater ZT = 999.0 # init GW variables WZ = 0 WZI = WZ WSUB0 = 0 if p.GW: print("WATER LIMITED CROP PRODUCTION WITH GROUNDWATER") else: print("WATER LIMITED CROP PRODUCTION WITHOUT GROUNDWATER") print("=================================================") print(" fixed fraction RDMso=%3.0f NOTinf=%.3f" % (p.RDMSOL, p.NOTINF)) print(" SMLIM=%.3f RDM=%3.0i WAV=%3.0f SSmax=%3.0f" % \ (p.SMLIM, self.RDMSLB, p.WAV, p.SSMAX)) # water content for each layer + a few fixed points often used for il in range (0, p.NSL): p.SOIL_LAYERS[il]['WC'] = p.SOIL_LAYERS[il]['SM'] * p.SOIL_LAYERS[il]['TSL'] # state variable p.SOIL_LAYERS[il]['WC0'] = p.SOIL_LAYERS[il]['SOILTYPE']['SM0'] * p.SOIL_LAYERS[il]['TSL'] p.SOIL_LAYERS[il]['WCW'] = p.SOIL_LAYERS[il]['SOILTYPE']['SMW'] * p.SOIL_LAYERS[il]['TSL'] p.SOIL_LAYERS[il]['WCFC'] = p.SOIL_LAYERS[il]['SOILTYPE']['SMFCF'] * p.SOIL_LAYERS[il]['TSL'] p.SOIL_LAYERS[il]['CondFC'] = 10.0**p.SOIL_LAYERS[il]['SOILTYPE']['CONTAB'](self.PFFC) p.SOIL_LAYERS[il]['CondK0'] = 10.0**p.SOIL_LAYERS[il]['SOILTYPE']['CONTAB'](self.PFSAT) #print "layer %i: TSL %f WC %f SM %f SMfcf %f SMsat %f" % (il, \ # p.SOIL_LAYERS[il]['TSL'], p.SOIL_LAYERS[il]['WC'], p.SOIL_LAYERS[il]['SM'], \ # p.SOIL_LAYERS[il]['SOILTYPE']['SMFCF'], p.SOIL_LAYERS[il]['SOILTYPE']['SM0']) print("layer %i %3.1f cm: SM0=%.3f SMFC=%.3f SMW=%.3f" % (il, p.SOIL_LAYERS[il]['TSL'], \ p.SOIL_LAYERS[il]['SOILTYPE']['SM0'], p.SOIL_LAYERS[il]['SOILTYPE']['SMFCF'], \ p.SOIL_LAYERS[il]['SOILTYPE']['SMW'])) # rootzone and subsoil water WI = W WLOWI = WLOW WWLOW = W + WLOW SM = W / RD # p.SOIL_LAYERS[p.NSL-1]['LBSL'] # soil evaporation, days since last rain @ToDo: <= or <? self.DSLR = 1.0 if p.SOIL_LAYERS[0]['SM'] <= (p.SOIL_LAYERS[0]['SOILTYPE']['SMW'] + \ 0.5*(p.SOIL_LAYERS[0]['SOILTYPE']['SMFCF'] - \ p.SOIL_LAYERS[0]['SOILTYPE']['SMW'])): self.DSLR=5.0 self.RINold = 0. # RIN is used in calc_rates before it is set, so keep the RIN as RINold? self.NINFTB = Afgen([0.0,0.0, 0.5,0.0, 1.5,1.0, 0.0,0.0, 0.0,0.0, \ 0.0,0.0, 0.0,0.0, 0.0,0.0, 0.0,0.0, 0.0,0.0]) # INITIAL STATES self.states = self.StateVariables(kiosk, publish=["SM","W","WLOW","RAINT","WTRAT"], SM=SM, SS=SS, ZT=ZT, WZ=WZ, WZI=WZI, WSUB0=WSUB0, W=W, WI=WI, WLOW=WLOW, WLOWI=WLOWI, WWLOW=WWLOW, WWLOWI=WWLOW, WAVUPP=WAVUPP, WAVLOW=WAVLOW, ILR=ILR, ILM=ILM, WTRAT=0.0, EVST=0.0, EVWT=0.0, TSR=0.0, RAINT=0.0, WDRT=0.0, TOTINF=0.0, TOTIRR=0.0, SUMSM=0.0, PERCT=0.0, LOSST=0.0, RunOff=0.0, IN=0.0, CRT=0.0, DRAINT=0.0, WBOT=0.0, WSUB=0.0, WBALRT=-999.0, WBALTT=-999.0) self.rates = self.RateVariables(kiosk) self.kiosk = kiosk # ------------------------------------------ # ILaR: code taken from classic waterbalance # ------------------------------------------ # Connect to CROP_EMERGED/CROP_FINISH signals for water balance to # search for crop transpiration values self._connect_signal(self._on_CROP_EMERGED, signals.crop_emerged) self._connect_signal(self._on_CROP_FINISH, signals.crop_finish) # ------------------------------------------ #--------------------------------------------------------------------------- @prepare_rates def calc_rates(self, day, drv): # layered s = self.states p = self.params r = self.rates DELT = 1 RD = self._determine_rooting_depth() if RD != self.RDold: msg = "Rooting depth changed unexpectedly" raise RuntimeError(msg) #print "calc_rates WaterbalanceLayered NSL %i, GW %s, RD %f" % (p.NSL, p.GW, RD) # conductivities and Matric Flux Potentials for all layers PF = np.zeros(p.NSL) Conductivity = np.zeros(p.NSL) MatricFluxPot = np.zeros(p.NSL) EquilWater = np.zeros(p.NSL) for il in range (0, p.NSL): PF[il] = p.SOIL_LAYERS[il]['SOILTYPE']['PFTAB'](p.SOIL_LAYERS[il]['SM']) #print "layer %i: PF %f SM %f" % (il, PF[il], p.SOIL_LAYERS[il]['SM']) Conductivity[il] = 10.0**p.SOIL_LAYERS[il]['SOILTYPE']['CONTAB'](PF[il]) MatricFluxPot[il] = p.SOIL_LAYERS[il]['SOILTYPE']['MFPTAB'](PF[il]) if p.GW: # equilibrium amounts if p.SOIL_LAYERS[il]['LBSL'] < s.ZT: # groundwater below layer EquilWater[il] = p.SOIL_LAYERS[il]['SOILTYPE']['WaterFromHeight'](s.ZT-p.SOIL_LAYERS[il]['LBSL']+\ p.SOIL_LAYERS[il]['TSL']) \ - p.SOIL_LAYERS[il]['SOILTYPE']['WaterFromHeight'](s.ZT-p.SOIL_LAYERS[il]['LBSL']) elif p.SOIL_LAYERS[il]['LBSL']-p.SOIL_LAYERS[il]['TSL'] < s.ZT: # groundwater in layer EquilWater[il] = p.SOIL_LAYERS[il]['SOILTYPE']['WaterFromHeight'](s.ZT-p.SOIL_LAYERS[il]['LBSL']+\ p.SOIL_LAYERS[il]['TSL']) \ + (p.SOIL_LAYERS[il]['LBSL']-s.ZT) * p.SOIL_LAYERS[il]['SOILTYPE']['SM0'] else: # groundwater above layer EquilWater[il] = p.SOIL_LAYERS[il]['WC0'] # ------------------------------------------ # ILaR: code taken from classic waterbalance # ------------------------------------------ # Rainfall rate, not from kiosk??? r.RAIN = drv.RAIN # Transpiration and maximum soil and surfacewater evaporation rates # are calculated by the crop Evapotranspiration module. # However, if the crop is not yet emerged then set TRA=0 and use # the potential soil/water evaporation rates directly because there is # no shading by the canopy. if self.flag_crop_emerged is True: r.WTRA = self.kiosk["TRA"] r.WTRAL = self.kiosk["TRALY"] EVWMX = self.kiosk["EVWMX"] EVSMX = self.kiosk["EVSMX"] else: r.WTRA = 0. r.WTRAL = np.zeros(20) EVWMX = drv.E0 EVSMX = drv.ES0 # ------------------------------------------ # actual evaporation rates ... r.EVW = 0. r.EVS = 0. # ... from surface water if surface storage more than 1 cm, ... if s.SS > 1.: r.EVW = EVWMX else: # ... else from soil surface if self.RINold >= 1.: # RIN not set, must be RIN from previous 'call' r.EVS = EVSMX self.DSLR = 1. else: self.DSLR += 1. EVSMXT = EVSMX*(sqrt(self.DSLR) - sqrt(self.DSLR-1.)) r.EVS = min(EVSMX, EVSMXT+self.RINold) # preliminary infiltration rate if s.SS <= 0.1: # without surface storage if p.IFUNRN==0.: RINPRE = (1.-p.NOTINF)*r.RAIN + r.RIRR + s.SS/DELT if p.IFUNRN==1.: RINPRE = (1.-p.NOTINF*self.NINFTB(r.RAIN))*r.RAIN + r.RIRR + s.SS/DELT else: # with surface storage, infiltration limited by SOPE (topsoil) AVAIL = s.SS + (r.RAIN * (1.-p.NOTINF) + r.RIRR - r.EVW) * DELT RINPRE = min(p.SOIL_LAYERS[0]['SOILTYPE']['SOPE']*DELT, AVAIL) / DELT # maximum flow at Top Boundary of each layer # ------------------------------------------ # DOWNWARD flows are calculated in two ways, # (1) a "dry flow" from the matric flux potentials # (2) a "wet flow" under the current layer conductivities and downward gravity. # Clearly, only the dry flow may be negative (=upward). The dry flow accounts for the large # gradient in potential under dry conditions (but neglects gravity). The wet flow takes into # account gravity only and will dominate under wet conditions. The maximum of the dry and wet # flow is taken as the downward flow, which is then further limited in order the prevent # (a) oversaturation and (b) water content to decrease below field capacity. # # UPWARD flow is just the dry flow when it is negative. In this case the flow is limited # to a certain fraction of what is required to get the layers at equal potential, taking # into account, however, the contribution of an upward flow from further down. Hence, in # case of upward flow from the groundwater, this upward flow in propagated upward if the # suction gradient is sufficiently large. EVflow = np.zeros(p.NSL+1) # 1 more FlowMX = np.zeros(p.NSL+1) # 1 more Flow = np.zeros(p.NSL+1) # 1 more LIMWET = np.zeros(p.NSL) LIMDRY = np.zeros(p.NSL) for il in range (0, p.NSL): p.SOIL_LAYERS[il]['DWC'] = 0.0 # water change # first get flow through lower boundary of bottom layer if p.GW: # the old capillairy rise routine is used to estimate flow to/from the groundwater # note that this routine returns a positive value for capillairy rise and a negative # value for downward flow, which is the reverse from the convention in WATFDGW. if s.ZT >= p.SOIL_LAYERS[p.NSL-1]['LBSL']: # groundwater below the layered system ; call the old capillairty rise routine # the layer PF is allocated at 1/3 * TSL above the lower boundary ; this leeds # to a reasonable result for groundwater approaching the bottom layer SubFlow = self._SUBSOL(PF[p.NSL-1], \ s.ZT-p.SOIL_LAYERS[p.NSL-1]['LBSL']+p.SOIL_LAYERS[p.NSL-1]['TSL']/3.0, \ p.SOIL_LAYERS[p.NSL-1]['SOILTYPE']['CONTAB']) if SubFlow >= 0.0: # capillairy rise is limited by the amount required to reach equilibrium: # step 1. calculate equilibrium ZT for all air between ZT and top of layer EqAir = s.WSUB0 - s.WSUB + (p.SOIL_LAYERS[p.NSL-1]['WC0'] - p.SOIL_LAYERS[p.NSL-1]['WC']) # step 2. the grouindwater level belonging to this amount of air in equilibrium ZTeq1 = (p.SOIL_LAYERS[p.NSL-1]['LBSL'] - p.SOIL_LAYERS[p.NSL-1]['TSL']) \ + p.SOIL_LAYERS[il]['SOILTYPE']['HeightFromAir'](EqAir) # step 3. this level should normally lie below the current level # (otherwise there should not be capillairy rise) # in rare cases however, due to the use of a mid-layer height # in subroutine SUBSOL, a deviation could occur ZTeq2 = max(s.ZT, ZTeq1) # step 4. calculate for this ZTeq2 the equilibrium amount of water in the layer WCequil = p.SOIL_LAYERS[il]['SOILTYPE']['WaterFromHeight'](ZTeq2-p.SOIL_LAYERS[p.NSL-1]['LBSL']+\ p.SOIL_LAYERS[p.NSL-1]['TSL']) \ - p.SOIL_LAYERS[il]['SOILTYPE']['WaterFromHeight'](ZTeq2-p.SOIL_LAYERS[p.NSL-1]['LBSL']) # step5. use this equilibrium amount to limit the upward flow FlowMX[p.NSL] = -1.0 * min(SubFlow, max(WCequil-p.SOIL_LAYERS[p.NSL-1]['WC'], 0.0)/DELT) else: # downward flow ; air-filled pore space of subsoil limits downward flow AirSub = (s.ZT-p.SOIL_LAYERS[p.NSL-1]['LBSL'])*p.SOIL_LAYERS[p.NSL-1]['SOILTYPE']['SM0'] \ - p.SOIL_LAYERS[p.NSL-1]['SOILTYPE']['WaterFromHeight'](s.ZT-p.SOIL_LAYERS[p.NSL-1]['LBSL']) FlowMX[p.NSL] = min(abs(SubFlow), max(AirSub, 0.0)/DELT) else: # groundwater is in the layered system ; no further downward flow FlowMX[p.NSL] = 0.0 else: # not GW # Bottom layer conductivity limits the flow. Below field capacity there is no # downward flow, so downward flow through lower boundary can be guessed as FlowMX[p.NSL] = max(p.SOIL_LAYERS[p.NSL-1]['CondFC'], Conductivity[p.NSL-1]) # drainage r.DMAX = 0.0 for il in range (p.NSL-1, -1, -1): # if this layers contains maximum rootig depth and if rice, downward water loss is limited if p.IAIRDU==1 and il==s.ILM: FlowMX[il+1] = 0.05 * p.SOIL_LAYERS[il]['CondK0'] # limiting DOWNWARD flow rate # == wet conditions: the soil conductivity is larger # the soil conductivity is the flow rate for gravity only # this limit is DOWNWARD only # == dry conditions: the MFP gradient # the MFP gradient is larger for dry conditions # allows SOME upward flow if il==0: LIMWET[il] = p.SOIL_LAYERS[0]['SOILTYPE']['SOPE'] LIMDRY[il] = 0.0 else: # same soil type if p.SOIL_LAYERS[il-1]['SOILTYPE']['SOIL_GROUP_NO'] == p.SOIL_LAYERS[il]['SOILTYPE']['SOIL_GROUP_NO']: # flow rate estimate from gradient in Matric Flux Potential LIMDRY[il] = 2.0 * (MatricFluxPot[il-1]-MatricFluxPot[il])/(p.SOIL_LAYERS[il-1]['TSL'] + p.SOIL_LAYERS[il]['TSL']) if LIMDRY[il] < 0.0: # upward flow rate ; amount required for equal water content is required below MeanSM = (p.SOIL_LAYERS[il-1]['WC']+p.SOIL_LAYERS[il]['WC']) / (p.SOIL_LAYERS[il-1]['TSL'] + p.SOIL_LAYERS[il]['TSL']) EqualPotAmount = p.SOIL_LAYERS[il-1]['WC'] - p.SOIL_LAYERS[il-1]['TSL'] * MeanSM # should be negative like the flow else: # different soil types # iterative search to PF at layer boundary (by bisection) PF1 = PF[il-1] PF2 = PF[il] MFP1 = MatricFluxPot[il-1] MFP2 = MatricFluxPot[il] for i in range (0, self.MaxFlowIter): PFx = (PF1 + PF2) / 2.0 Flow1 = 2.0 * (+ MFP1 - p.SOIL_LAYERS[il-1]['MFPTAB'](PFx)) / p.SOIL_LAYERS[il-1]['TSL'] Flow2 = 2.0 * (- MFP2 + p.SOIL_LAYERS[ il ]['MFPTAB'](PFx)) / p.SOIL_LAYERS[il]['TSL'] if abs(Flow1-Flow2) < TinyFlow: # sufficient accuracy break elif abs(Flow1) > abs(Flow2): # flow in layer 1 is larger ; PFx must shift in the direction of PF1 PF2 = PFx elif abs(Flow1) < abs(Flow2): # flow in layer 2 is larger ; PFx must shift in the direction of PF2 PF1 = PFx if i >= self.MaxFlowIter: msg = "LIMDRY flow iteration failed" raise RuntimeError(msg) LIMDRY[il] = (Flow1 + Flow2) / 2.0 if LIMDRY[il] < 0.0: # upward flow rate ; amount required for equal potential is required below Eq1 = -p.SOIL_LAYERS[il]['WC'] Eq2 = 0.0 for i in range (0, self.MaxFlowIter): EqualPotAmount = (Eq1 + Eq2) / 2.0 SM1 = (p.SOIL_LAYERS[il-1]['WC'] - EqualPotAmount) / p.SOIL_LAYERS[il-1]['TSL'] SM2 = (p.SOIL_LAYERS[ il ]['WC'] + EqualPotAmount) / p.SOIL_LAYERS[il]['TSL'] PF1 = p.SOIL_LAYERS[il-1]['SMTAB'](SM1) PF2 = p.SOIL_LAYERS[ il ]['SMTAB'](SM2) if abs(Eq1-Eq2) < TinyFlow: # sufficient accuracy break elif PF1 > PF2: # suction in top layer larger; absolute amount should be larger Eq2 = EqualPotAmount else: # suction in bottom layer larger; absolute amount should be reduced Eq1 = EqualPotAmount if i >= self.MaxFlowIter: msg = "Limiting amount iteration failed" raise RuntimeError(msg) # the limit under wet conditions in a unit gradient LIMWET[il] = (p.SOIL_LAYERS[il-1]['TSL'] + p.SOIL_LAYERS[il]['TSL']) \ / (p.SOIL_LAYERS[il-1]['TSL']/Conductivity[il-1] + p.SOIL_LAYERS[il]['TSL']/Conductivity[il]) FlowDown = True if LIMDRY[il] < 0.0: # upward flow (negative !) is limited by fraction of amount required for equilibrium FlowMax = max(LIMDRY[il], EqualPotAmount * UpwardFlowLimit) if il > 0: # upward flow is limited by amount required to bring target layer at equilibrium/field capacity if p.GW: # soil does not drain below equilibrium with groundwater FCequil = max(p.SOIL_LAYERS[il-1]['WCFC'], EquilWater[il-1]) else: # free drainage FCequil = p.SOIL_LAYERS[il-1]['WCFC'] TargetLimit = r.WTRAL[il-1] + (FCequil-p.SOIL_LAYERS[il-1]['WC'])/DELT if TargetLimit > 0.0: # target layer is "dry": below field capacity ; limit upward flow FlowMax = max(FlowMax, -1.0*TargetLimit) # there is no saturation prevention since upward flow leads to a decrease ofp.SOIL_LAYERS[il]['WC'] # instead flow is limited in order to prevent a negative water content FlowMX[il] = max(FlowMax, FlowMX[il+1] + r.WTRAL[il] - p.SOIL_LAYERS[il]['WC']/DELT) FlowDown = False elif p.GW: # target layer is "wet": above field capacity, since gravity is # neglected in the matrix potential model, upward flow tends to be # overestyimated in wet conditions. With groundwater the profile # can get filled with water from above and upward flow is set to zero here. FlowMX[il] = 0.0 FlowDown = False else: # target layer is "wet": above field capacity, no groundwater # free drainage model implies that upward flow is rejected here # instead, downward flow is enabled. This guarantees that, if all # layers are above field capacity, the free drainage model applies. FlowDown = True if FlowDown: # maximum downward flow rate (LIMWET is always a positive number) FlowMax = max(LIMDRY[il], LIMWET[il]) # this prevents saturation of layer il # maximum top boundary flow is bottom boundary flow plus saturation deficit plus sink FlowMX[il] = min(FlowMax, FlowMX[il+1] + (p.SOIL_LAYERS[il]['WC0']-p.SOIL_LAYERS[il]['WC'])/DELT + r.WTRAL[il]) # adjustment of infiltration rate to prevent saturation r.RIN = min(RINPRE, FlowMX[0]) # contribution of layers to soil evaporation in case of drought upward flow is allowed EVSL = np.zeros(p.NSL) EVSL[0] = min(r.EVS, (p.SOIL_LAYERS[0]['WC'] - p.SOIL_LAYERS[0]['WCW'])/DELT + r.RIN - r.WTRAL[0]) EVrest = r.EVS - EVSL[0] for il in range (1, p.NSL): Available = max(0.0, (p.SOIL_LAYERS[il]['WC'] - p.SOIL_LAYERS[il]['WCW'])/DELT - r.WTRAL[il]) if Available >= EVrest: EVSL[il] = EVrest EVrest = 0.0 break else: EVSL[il] = Available EVrest = EVrest - Available # reduce evaporation if entire profile becomes airdry # there is no evaporative flow through lower boundary of layer NSL r.EVS -= EVrest #! evaporative flow (taken positive) at layer boundaries EVflow[0] = r.EVS for il in range (1, p.NSL): EVflow[il] = EVflow[il-1] - EVSL[il-1] EVflow[p.NSL] = 0.0 # limit downward flows not to get below field capacity / equilibrium content Flow[0] = r.RIN - EVflow[0] #print "Flow %i: %f = RIN %f - EVflow %f, sm: %f" % \ # (0, Flow[0], r.RIN, EVflow[0], p.SOIL_LAYERS[0]['SM']) for il in range (0, p.NSL): if p.GW: # soil does not drain below equilibrium with groundwater WaterLeft = max(p.SOIL_LAYERS[il]['WCFC'], EquilWater[il]) else: # free drainage WaterLeft = p.SOIL_LAYERS[il]['WCFC'] MXLOSS = (p.SOIL_LAYERS[il]['WC']-WaterLeft)/DELT # maximum loss #print " MXLOSS %i: %f = WC %f - WaterLeft %f, sm: %f" % \ # (il, MXLOSS, p.SOIL_LAYERS[il]['WC'], WaterLeft, p.SOIL_LAYERS[il]['SM']) Excess = max(0.0, MXLOSS + Flow[il] - r.WTRAL[il]) # excess of water (positive) #print " Excess %i: %f = max(0.0, MXLOSS %f + Flow %f - WTRAL %f)" % \ # (il, Excess, MXLOSS, Flow[il], r.WTRAL[il]) Flow[il+1] = min(FlowMX[il+1], Excess - EVflow[il+1]) # negative (upward) flow is not affected #print "Flow %i: %f = min(FlowMX %f, Excess %f - EVflow %f)" % # (il+1, Flow[il+1], FlowMX[il+1], Excess, EVflow[il+1]) # rate of change p.SOIL_LAYERS[il]['DWC'] = Flow[il] - Flow[il+1] - r.WTRAL[il] #print "layer %i: DWC %f=%f-%f-%f, sm: %f" % (il,\ # p.SOIL_LAYERS[il]['DWC'], Flow[il], Flow[il+1], r.WTRAL[il], \ # p.SOIL_LAYERS[il]['SM']) # Percolation and Loss. # Equations were derived from the requirement that in the same layer, above and below # depth RD (or RDM), the water content is uniform. Note that transpiration above depth # RD (or RDM) may require a negative percolation (or loss) for keeping the layer uniform. # This is in fact a numerical dispersion. After reaching RDM, this negative (LOSS) can be # prevented by choosing a layer boundary at RDM. if s.ILR < s.ILM: # layer ILR is devided into rooted part (where the sink is) and a below-roots part # The flow in between is PERC f1 = p.SOIL_LAYERS[s.ILR]['Wtop'] # 1-f1 = Wpot r.PERC = (1.0-f1) * (Flow[s.ILR]-r.WTRAL[s.ILR]) + f1 * Flow[s.ILR+1] # layer ILM is divided as well ; the flow in between is LOSS f2 = p.SOIL_LAYERS[s.ILM]['Wpot'] f3 = 1.0 - f2 # f3 = Wund r.LOSS = f3 * Flow[s.ILM] + f2 * Flow[s.ILM+1] elif s.ILR == s.ILM: # depths RD and RDM in the same soil layer: there are three "sublayers": # - the rooted sublayer with fraction f1 # - between RD and RDM with fraction f2 # - below RDM with fraction f3 # PERC goes from 1->2, LOSS from 2->3 # PERC and LOSS are calculated in such a way that the three sublayers have equal SM f1 = p.SOIL_LAYERS[s.ILR]['Wtop'] r.PERC = (1.0-f1) * (Flow[s.ILR]-r.WTRAL[s.ILR]) + f1 * Flow[s.ILR+1] f2 = p.SOIL_LAYERS[s.ILM]['Wpot'] f3 = 1.0 - f1 - f2 r.LOSS = f3 * (Flow[s.ILM]-r.WTRAL[s.ILM]) + (1.0-f3) * Flow[s.ILM+1] else: msg = "Internal_1" raise RuntimeError(msg) # rates of change in amounts of moisture W and WLOWI r.DW = - r.WTRAL.sum() - r.EVS - r.PERC + r.RIN r.DWLOW = r.PERC - r.LOSS #print "DW %f= -WTRAL %f -EVS %f -PERC %f +RIN %f" % (r.DW, r.WTRAL.sum(), r.EVS, r.PERC, r.RIN) #print "DWLOW %f= PERC %f - LOSS %f" % (r.DWLOW, r.PERC, r.LOSS) if p.GW: # groundwater influence r.DWBOT = r.LOSS - Flow[p.NSL] r.DWSUB = Flow[p.NSL] #print "DWBOT %f= LOSS %f - Flow %f" % (r.DWBOT, r.LOSS, Flow[p.NSL]) #print "DWSUB %f= Flow %f" % (r.DWSUB, Flow[p.NSL]) #msg = '\n'.join(['%s = %s' % (k,v) for k,v in self.kiosk.iteritems()]) #print(msg) #--------------------------------------------------------------------------- @prepare_states def integrate(self, day, delt=1.0): s = self.states p = self.params r = self.rates #print "integrate WaterbalanceLayered NSL %i, GW %s" % (p.NSL, p.GW) DELT = 1 # zou weg kunnen en dan ook overal DELT opruimen #!----------------------------------------------------------------------- #! integrals of the water balance: summation and state variables #!----------------------------------------------------------------------- #! amount of water in soil layers ; soil moisture content for il in range (0, p.NSL): p.SOIL_LAYERS[il]['WC'] += p.SOIL_LAYERS[il]['DWC'] * DELT p.SOIL_LAYERS[il]['SM'] = p.SOIL_LAYERS[il]['WC'] / p.SOIL_LAYERS[il]['TSL'] #print "layer %i: WC +%f=%f sm: %f" % (il, p.SOIL_LAYERS[il]['DWC'], p.SOIL_LAYERS[il]['WC'], p.SOIL_LAYERS[il]['SM']) # totals s.WTRAT += r.WTRAL.sum()*DELT # transpiration s.EVWT += r.EVW*DELT # evaporation from surface water layer and/or soil s.EVST += r.EVS*DELT s.RAINT += r.RAIN*DELT # rainfall, irrigation and infiltration s.TOTINF += r.RIN*DELT s.TOTIRR += r.RIRR*DELT SSPRE = s.SS + (r.RAIN+r.RIRR-r.EVW-r.RIN)*DELT s.SS = min(SSPRE, p.SSMAX) # surface storage s.TSR += (SSPRE-s.SS) # surface runoff # amounts of water s.W += r.DW*DELT # in rooted zone # classic test if negative: if W < 0.0: EVST += W ; w = 0.0 s.WAVUPP += r.DW*DELT s.WLOW += r.DWLOW*DELT # in unrooted, lower part of rootable zone s.WAVLOW += r.DWLOW*DELT s.WWLOW = s.W + s.WLOW # total in the whole rootable zone s.WBOT += r.DWBOT * DELT # and in layered soil below RDM # percolation from rootzone ; interpretation depends on mode if p.GW: # flow is either percolation or capillairy rise if r.PERC > 0.0: s.PERCT += r.PERC*DELT else: s.CRT -= r.PERC*DELT else: # flow is always called percolation s.PERCT += r.PERC*DELT s.CRT = 0.0 # loss of water by flow from the potential rootzone s.LOSST += r.LOSS*DELT #---------------------------------------------- # change of rootzone subsystem boundary #---------------------------------------------- # calculation of amount of soil moisture in new rootzone RD = self._determine_rooting_depth() # ??? if (RD-self.RDold) > 0.001: # roots have grown find new values ; overwrite W, WLOW, WAVUPP, WAVLOW, WBOT s.ILR = p.NSL-1 s.ILM = p.NSL-1 for il in range (p.NSL-1, -1, -1): if (p.SOIL_LAYERS[il]['LBSL'] >= RD ): s.ILR = il if (p.SOIL_LAYERS[il]['LBSL'] >= self.RDMSLB): s.ILM = il self._layer_weights(RD, self.RDMSLB, s.ILR, s.ILM) WOLD = s.W s.W = 0.0 s.WLOW = 0.0 s.WBOT = 0.0 s.WAVUPP = 0.0 s.WAVLOW = 0.0 # get W and WLOW and available water amounts for il in range (0, p.NSL): s.W += p.SOIL_LAYERS[il]['WC'] * p.SOIL_LAYERS[il]['Wtop'] s.WLOW += p.SOIL_LAYERS[il]['WC'] * p.SOIL_LAYERS[il]['Wpot'] s.WBOT += p.SOIL_LAYERS[il]['WC'] * p.SOIL_LAYERS[il]['Wund'] s.WAVUPP += (p.SOIL_LAYERS[il]['WC']-p.SOIL_LAYERS[il]['WCW']) * p.SOIL_LAYERS[il]['Wtop'] s.WAVLOW += (p.SOIL_LAYERS[il]['WC']-p.SOIL_LAYERS[il]['WCW']) * p.SOIL_LAYERS[il]['Wpot'] WDR = s.W - WOLD # water added to root zone by root growth, in cm s.WDRT += WDR # total water addition to rootzone by root growth self.RDold = RD # save RD for which layer contents have been determined s.SM = s.W/RD # mean soil moisture content in rooted zone s.SUMSM += s.SM*DELT # calculating mean soil moisture content over growing period #---------------------------------------------- # groundwater level #---------------------------------------------- if p.GW: # with groundwater influence s.WSUB += r.DWSUB * DELT # subsoil between soil layers and reference plane s.WZ = s.WLOW + s.WBOT + s.WSUB # amount of water below rooted zone # find groundwater level ZTfound = False for il in range (p.NSL-1, -1, -1): if il==p.NSL-1: AirSub = s.WSUB0 - s.WSUB if AirSub > 0.01: # groundwater is in subsoil which is not completely saturated s.ZT = min(p.SOIL_LAYERS[p.NSL-1]['LBSL'] + p.SOIL_LAYERS[il]['SOILTYPE']['HeightFromAir'](AirSub), self.XDEF) ZTfound = True break if p.SOIL_LAYERS[il]['SM'] < 0.999 * p.SOIL_LAYERS[il]['SOILTYPE']['SM0']: # groundwater is in this layer s.ZT = p.SOIL_LAYERS[il]['LBSL'] - p.SOIL_LAYERS[il]['TSL'] \ + min(p.SOIL_LAYERS[il]['TSL'], p.SOIL_LAYERS[il]['SOILTYPE']['HeightFromAir'](p.SOIL_LAYERS[il]['WC0']-p.SOIL_LAYERS[il]['WC'])) ZTfound = True break if not ZTfound: # entire system saturated s.ZT = 0.0 # quick-and-dirty: do once some printing on leaves died if self.kiosk.get("WLV",-9) == 0 and s.WBALRT == -999: self.finalize(day) #--------------------------------------------------------------------------- @prepare_states def finalize(self, day): s = self.states p = self.params # Checksums waterbalance for rootzone (WBALRT) and whole system (WBALTT) # --- # GW: WZI/WZ iso WLOWI/WLOW, + CRT, DRAINT iso LOSST, LOSST part of WZ? if p.GW: s.WBALRT = s.TOTINF + s.CRT + s.WI + s.WDRT \ - s.EVST - s.WTRAT - s.PERCT - s.W s.WBALTT = p.SSI + s.RAINT + s.TOTIRR + s.WI + s.WZI \ - s.W - s.WZ - s.WTRAT - s.EVWT - s.EVST - s.TSR - s.DRAINT - s.SS else: # mean water content rooting zone during crop growth and total # water content of the potentially rooted zone at end of simulation #MWC = SUMSM/max(1.,REAL (MOD((365+IDHALT-IDEM),365))) #TWE = W+WLOW s.WBALRT = s.TOTINF + s.WI + s.WDRT \ - s.EVST - s.WTRAT - s.PERCT - s.W s.WBALTT = p.SSI + s.RAINT + s.TOTIRR + s.WI - s.W + s.WLOWI - \ s.WLOW - s.WTRAT - s.EVWT - s.EVST - s.TSR - s.LOSST - s.SS print("\n WATER BALANCE WHOLE SYSTEM (1 DIMENS. COLUMN ; cm)") print(" init max root zone %5.1f final max root zone %5.1f change: %5.1f" % (s.WWLOWI, s.WWLOW, s.WWLOW-s.WWLOWI)) print(" init surf storage %5.1f final surf storage %5.1f change: %5.1f" % (p.SSI, s.SS, s.SS-p.SSI)) print(" irrigation %5.1f evap water surface %5.1f" % (s.TOTIRR, s.EVWT)) print(" rainfall %5.1f evap soil surface %5.1f" % (s.RAINT, s.EVST)) print(" transpiration %5.1f to atmos: %5.1f" % (s.WTRAT, s.EVWT+s.EVST+s.WTRAT)) print(" surface runoff %5.1f" % (s.TSR)) print(" lost to deep soil %5.1f" % (s.LOSST)) print(" TOTAL INIT + IN %5.1f TOTAL FINAL + OUT %5.1f checksum: %5.1f" % (s.WWLOWI+p.SSI+s.TOTIRR+s.RAINT, \ s.WWLOW+s.SS+s.EVWT+s.EVST+s.WTRAT+s.TSR+s.LOSST, \ s.WBALTT)) print("\n WATER BALANCE ROOT ZONE") print(" initial water stock %5.1f final water stock %5.1f" % (s.WI, s.W)) print(" infiltration %5.1f evap soil surface %5.1f" % (s.TOTINF, s.EVST)) print(" added by root growth %5.1f transpiration %5.1f" % (s.WDRT, s.WTRAT)) print(" percolation %5.1f" % (s.PERCT)) print(" TOTAL INIT + IN %5.1f FINAL + OUT %5.1f checksum: %5.1f" % (s.WI+s.TOTINF+s.WDRT, \ s.W+s.EVST+s.WTRAT+s.PERCT, \ s.WBALRT)) print("\n") if abs(s.WBALRT) > 0.0001 or abs(s.WBALTT) > 0.0001: msg = "Error in layered waterbalance!" raise RuntimeError(msg) # Run finalize on the subSimulationObjects SimulationObject.finalize(self, day) def _determine_rooting_depth(self): """Determines appropriate use of the rooting depth (RD) """ p = self.params if self.flag_crop_emerged is False and \ self.flag_crop_finished is False: # before crop starts hold RD at initial value RDI RD = p.RDI elif self.flag_crop_emerged is True and \ self.flag_crop_finished is False: # In cropping season RD = self.kiosk["RD"] elif self.flag_crop_emerged is True and \ self.flag_crop_finished is True: # Crop finished if "RD" in self.kiosk: # Only happens at the final simulation cycle when value for # SM still has to be computed RD = self.kiosk["RD"] else: RD = p.RDI else: msg = "Unable to determine rooting depth in Waterbalance Layered" raise RuntimeError(msg) return RD def _layer_weights(self, RD, RDM, ILR, ILM): """Calculate weight factors for rooted- and sub-layer calculations """ # RD the rooting depth # ILR deepest layer containing roots # ILM deepest layer that will contain roots # RDM max rooting depth aligned to soil layer boundary # NSL number of layers # TSL the layerthickness # LBSL the Lower Boundaries of the NSL soil layers # Wtop weights for contribution to rootzone # Wpot weights for contribution to potentially rooted zone # Wund weights for contribution to never rooted layers p = self.params s = self.states NSL = p.NSL sl = p.SOIL_LAYERS #print "---\nlayer_weights NSL: %i ILR: %i ILM: %i RD: %f RDM: %f\n---" % \ # (NSL, ILR, ILM, RD, RDM) for il in range (0, NSL): # rooted layer if (il < ILR): sl[il]['Wtop'] = 1.0 sl[il]['Wpot'] = 0.0 sl[il]['Wund'] = 0.0 # partly rooted elif (il == ILR and il < ILM): # at the end fully rooted sl[il]['Wtop'] = 1.0 - (sl[il]['LBSL']-RD) / sl[il]['TSL'] sl[il]['Wpot'] = 1.0 - sl[il]['Wtop'] sl[il]['Wund'] = 0.0 elif (il == ILR and il == ILM): # at the end partly rooted sl[il]['Wtop'] = 1.0 - (sl[il]['LBSL']-RD) / sl[il]['TSL'] sl[il]['Wund'] = (sl[il]['LBSL']-RDM) / sl[il]['TSL'] sl[il]['Wpot'] = 1.0 - sl[il]['Wund'] - sl[il]['Wtop'] # not rooted elif (il < ILM): # at the end fully rooted sl[il]['Wtop'] = 0.0 sl[il]['Wpot'] = 1.0 sl[il]['Wund'] = 0.0 elif (il == ILM): # at the end partly rooted sl[il]['Wtop'] = 0.0 sl[il]['Wund'] = (sl[il]['LBSL']-RDM) / sl[il]['TSL'] sl[il]['Wpot'] = 1.0 - sl[il]['Wund'] # never rooted else: sl[il]['Wtop'] = 0.0 sl[il]['Wpot'] = 0.0 sl[il]['Wund'] = 1.0 #msg = 'layer %i: %f %f weights top %f pot %f und %f' % \ # (il, sl[il]['TSL'], sl[il]['LBSL'], \ # sl[il]['Wtop'], sl[il]['Wpot'], sl[il]['Wund']) #print msg def _SUBSOL(self, PF, D, CONTAB): """SUBSOL... """ DEL = np.zeros(4) PFGAU = np.zeros(12) HULP = np.zeros(12) CONDUC = np.zeros(12) ELOG10 = 2.302585 LOGST4 = 2.518514 START = np.array([0, 45, 170, 330]) PFSTAN = np.array([0.705143, 1.352183, 1.601282, 1.771497, 2.031409, \ 2.192880, 2.274233, 2.397940, 2.494110]) PGAU = np.array([0.1127016654, 0.5, 0.8872983346]) WGAU = np.array([0.2777778, 0.4444444, 0.2777778]) # calculation of matric head and check on small pF PF1 = PF D1 = D MH = exp(ELOG10*PF1) if PF1 <= 0.: # in case of small matric head K0 = exp(ELOG10 * CONTAB(-1)) FLOW = K0 * (MH/D-1) else: IINT = 0 # number and width of integration intervals for I1 in range (0, 4): if I1 <= 2: DEL[I1] = min(START[I1+1],MH)-START[I1] elif I1 == 3: DEL[I1] = PF1-LOGST4 if DEL[I1] <= 0: break; IINT += 1 # preparation of three-point Gaussian integration for I1 in range (0, IINT): for I2 in range (0, 3): I3 = (3*I1) + I2 if I1 == IINT-1: # the three points in the last interval are calculated if IINT <= 3: PFGAU[I3] = log10 (START[IINT-1] + PGAU[I2] * DEL[IINT-1]) elif IINT == 4: PFGAU[I3] = LOGST4 + PGAU[I2] * DEL[IINT-1] else: PFGAU[I3] = PFSTAN[I3] # variables needed in the loop below CONDUC[I3] = exp(ELOG10 * CONTAB(PFGAU[I3])) HULP[I3] = DEL[I1] * WGAU[I2] * CONDUC[I3] if I3 > 8: HULP[I3] = HULP[I3] * ELOG10 * exp(ELOG10 * PFGAU[I3]); # 15.5 setting upper and lower limit FU = 1.27 FL = -1 * exp(ELOG10 * CONTAB(PF1)) if MH <= D1: FU = 0 if MH >= D1: FL = 0 if MH != D1: # Iteration loop IMAX = 3 * IINT; for I1 in range (0, 15): FLW = (FU+FL)/2 DF = (FU-FL)/2 if DF < 0.01 and DF/abs(FLW) < 0.1: break Z = 0 for I2 in range (0, IMAX): Z += HULP[I2]/(CONDUC[I2] + FLW) if Z >= D1: FL = FLW if Z <= D1: FU = FLW FLOW = (FU+FL)/2 return FLOW def _on_CROP_EMERGED(self): self.flag_crop_emerged = True def _on_CROP_FINISH(self): self.flag_crop_finished = True
jajberni/pcse_web
main/pcse/soil/waterbalance.py
Python
apache-2.0
60,436
[ "Gaussian" ]
f0215f1db3f9fdf69591fe16f0297e2caab237f86dd224c7506fa56a2a8d6f13
######################################################################## # File : InstallTools.py # Author : Ricardo Graciani ######################################################################## """ Collection of Tools for installation of DIRAC components: MySQL, DB's, Services's, Agents It only makes use of defaults in LocalInstallation Section in dirac.cfg The Following Options are used:: /DIRAC/Setup: Setup to be used for any operation /LocalInstallation/InstanceName: Name of the Instance for the current Setup (default /DIRAC/Setup) /LocalInstallation/LogLevel: LogLevel set in "run" script for all components installed /LocalInstallation/RootPath: Used instead of rootPath in "run" script if defined (if links are used to named versions) /LocalInstallation/InstancePath: Location where runit and startup directories are created (default rootPath) /LocalInstallation/UseVersionsDir: DIRAC is installed under versions/<Versioned Directory> with a link from pro (This option overwrites RootPath and InstancePath) /LocalInstallation/Host: Used when build the URL to be published for the installed service (default: socket.getfqdn()) /LocalInstallation/RunitDir: Location where runit directory is created (default InstancePath/runit) /LocalInstallation/StartupDir: Location where startup directory is created (default InstancePath/startup) /LocalInstallation/MySQLDir: Location where mysql databases are created (default InstancePath/mysql) /LocalInstallation/Database/User: (default Dirac) /LocalInstallation/Database/Password: (must be set for SystemAdministrator Service to work) /LocalInstallation/Database/RootPwd: (must be set for SystemAdministrator Service to work) /LocalInstallation/Database/Host: (must be set for SystemAdministrator Service to work) /LocalInstallation/Database/MySQLSmallMem: Configure a MySQL with small memory requirements for testing purposes innodb_buffer_pool_size=200MB /LocalInstallation/Database/MySQLLargeMem: Configure a MySQL with high memory requirements for production purposes innodb_buffer_pool_size=10000MB The setupSite method (used by the dirac-setup-site command) will use the following info:: /LocalInstallation/Systems: List of Systems to be defined for this instance in the CS (default: Configuration, Framework) /LocalInstallation/Databases: List of Databases to be installed and configured /LocalInstallation/Services: List of System/ServiceName to be setup /LocalInstallation/Agents: List of System/AgentName to be setup /LocalInstallation/WebPortal: Boolean to setup the Web Portal (default no) /LocalInstallation/ConfigurationMaster: Boolean, requires Configuration/Server to be given in the list of Services (default: no) /LocalInstallation/PrivateConfiguration: Boolean, requires Configuration/Server to be given in the list of Services (default: no) If a Master Configuration Server is being installed the following Options can be used:: /LocalInstallation/ConfigurationName: Name of the Configuration (default: Setup ) /LocalInstallation/AdminUserName: Name of the Admin user (default: None ) /LocalInstallation/AdminUserDN: DN of the Admin user certificate (default: None ) /LocalInstallation/AdminUserEmail: Email of the Admin user (default: None ) /LocalInstallation/AdminGroupName: Name of the Admin group (default: dirac_admin ) /LocalInstallation/HostDN: DN of the host certificate (default: None ) /LocalInstallation/VirtualOrganization: Name of the main Virtual Organization (default: None) """ __RCSID__ = "$Id$" # import os, re, glob, stat, time, shutil, socket gDefaultPerms = stat.S_IWUSR | stat.S_IRUSR | stat.S_IXUSR | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH import DIRAC from DIRAC import rootPath from DIRAC import gLogger from DIRAC import S_OK, S_ERROR from DIRAC.Core.Utilities.CFG import CFG from DIRAC.Core.Utilities.Version import getVersion from DIRAC.Core.Utilities.Subprocess import systemCall from DIRAC.ConfigurationSystem.Client.CSAPI import CSAPI from DIRAC.ConfigurationSystem.Client.Helpers import cfgPath, cfgPathToList, cfgInstallPath, \ cfgInstallSection, ResourcesDefaults, CSGlobals from DIRAC.Core.Security.Properties import ALARMS_MANAGEMENT, SERVICE_ADMINISTRATOR, \ CS_ADMINISTRATOR, JOB_ADMINISTRATOR, \ FULL_DELEGATION, PROXY_MANAGEMENT, OPERATOR, \ NORMAL_USER, TRUSTED_HOST from DIRAC.ConfigurationSystem.Client import PathFinder from DIRAC.Core.Base.private.ModuleLoader import ModuleLoader from DIRAC.Core.Base.AgentModule import AgentModule from DIRAC.Core.Base.ExecutorModule import ExecutorModule from DIRAC.Core.DISET.RequestHandler import RequestHandler from DIRAC.Core.Utilities.PrettyPrint import printTable from DIRAC.Core.Utilities.Platform import getPlatformString # On command line tools this can be set to True to abort after the first error. exitOnError = False # First some global defaults gLogger.debug( 'DIRAC Root Path =', rootPath ) def loadDiracCfg( verbose = False ): """ Read again defaults from dirac.cfg """ global localCfg, cfgFile, setup, instance, logLevel, linkedRootPath, host global basePath, instancePath, runitDir, startDir global db, mysqlDir, mysqlDbDir, mysqlLogDir, mysqlMyOrg, mysqlMyCnf, mysqlStartupScript global mysqlRootPwd, mysqlUser, mysqlPassword, mysqlHost, mysqlMode global mysqlSmallMem, mysqlLargeMem, mysqlPort, mysqlRootUser from DIRAC.Core.Utilities.Network import getFQDN localCfg = CFG() cfgFile = os.path.join( rootPath, 'etc', 'dirac.cfg' ) try: localCfg.loadFromFile( cfgFile ) except Exception: gLogger.always( "Can't load ", cfgFile ) gLogger.always( "Might be OK if setting up the site" ) setup = localCfg.getOption( cfgPath( 'DIRAC', 'Setup' ), '' ) instance = localCfg.getOption( cfgInstallPath( 'InstanceName' ), setup ) logLevel = localCfg.getOption( cfgInstallPath( 'LogLevel' ), 'INFO' ) linkedRootPath = localCfg.getOption( cfgInstallPath( 'RootPath' ), rootPath ) useVersionsDir = localCfg.getOption( cfgInstallPath( 'UseVersionsDir' ), False ) host = localCfg.getOption( cfgInstallPath( 'Host' ), getFQDN() ) basePath = os.path.dirname( rootPath ) instancePath = localCfg.getOption( cfgInstallPath( 'InstancePath' ), rootPath ) if useVersionsDir: # This option takes precedence instancePath = os.path.dirname( os.path.dirname( rootPath ) ) linkedRootPath = os.path.join( instancePath, 'pro' ) if verbose: gLogger.notice( 'Using Instance Base Dir at', instancePath ) runitDir = os.path.join( instancePath, 'runit' ) runitDir = localCfg.getOption( cfgInstallPath( 'RunitDir' ), runitDir ) if verbose: gLogger.notice( 'Using Runit Dir at', runitDir ) startDir = os.path.join( instancePath, 'startup' ) startDir = localCfg.getOption( cfgInstallPath( 'StartupDir' ), startDir ) if verbose: gLogger.notice( 'Using Startup Dir at', startDir ) # Now some MySQL default values db = {} mysqlDir = os.path.join( instancePath, 'mysql' ) mysqlDir = localCfg.getOption( cfgInstallPath( 'MySQLDir' ), mysqlDir ) if verbose: gLogger.notice( 'Using MySQL Dir at', mysqlDir ) mysqlDbDir = os.path.join( mysqlDir, 'db' ) mysqlLogDir = os.path.join( mysqlDir, 'log' ) mysqlMyOrg = os.path.join( rootPath, 'mysql', 'etc', 'my.cnf' ) mysqlMyCnf = os.path.join( mysqlDir, '.my.cnf' ) mysqlStartupScript = os.path.join( rootPath, 'mysql', 'share', 'mysql', 'mysql.server' ) mysqlRootPwd = localCfg.getOption( cfgInstallPath( 'Database', 'RootPwd' ), mysqlRootPwd ) if verbose and mysqlRootPwd: gLogger.notice( 'Reading Root MySQL Password from local configuration' ) mysqlUser = localCfg.getOption( cfgInstallPath( 'Database', 'User' ), '' ) if mysqlUser: if verbose: gLogger.notice( 'Reading MySQL User from local configuration' ) else: mysqlUser = 'Dirac' mysqlPassword = localCfg.getOption( cfgInstallPath( 'Database', 'Password' ), mysqlPassword ) if verbose and mysqlPassword: gLogger.notice( 'Reading %s MySQL Password from local configuration ' % mysqlUser ) mysqlHost = localCfg.getOption( cfgInstallPath( 'Database', 'Host' ), '' ) if mysqlHost: if verbose: gLogger.notice( 'Using MySQL Host from local configuration', mysqlHost ) else: # if it is not defined use the same as for dirac services mysqlHost = host mysqlPort = localCfg.getOption( cfgInstallPath( 'Database', 'Port' ), 0 ) if mysqlPort: if verbose: gLogger.notice( 'Using MySQL Port from local configuration ', mysqlPort ) else: # if it is not defined use the same as for dirac services mysqlPort = 3306 mysqlRootUser = localCfg.getOption( cfgInstallPath( 'Database', 'RootUser' ), '' ) if mysqlRootUser: if verbose: gLogger.notice( 'Using MySQL root user from local configuration ', mysqlRootUser ) else: # if it is not defined use root mysqlRootUser = 'root' mysqlMode = localCfg.getOption( cfgInstallPath( 'Database', 'MySQLMode' ), '' ) if verbose and mysqlMode: gLogger.notice( 'Configuring MySQL server as %s' % mysqlMode ) mysqlSmallMem = localCfg.getOption( cfgInstallPath( 'Database', 'MySQLSmallMem' ), False ) if verbose and mysqlSmallMem: gLogger.notice( 'Configuring MySQL server for Low Memory uasge' ) mysqlLargeMem = localCfg.getOption( cfgInstallPath( 'Database', 'MySQLLargeMem' ), False ) if verbose and mysqlLargeMem: gLogger.notice( 'Configuring MySQL server for Large Memory uasge' ) # FIXME: we probably need a better way to do this mysqlRootPwd = '' mysqlPassword = '' mysqlMode = '' localCfg = None cfgFile = '' setup = '' instance = '' logLevel = '' linkedRootPath = '' host = '' basePath = '' instancePath = '' runitDir = '' startDir = '' db = {} mysqlDir = '' mysqlDbDir = '' mysqlLogDir = '' mysqlMyOrg = '' mysqlMyCnf = '' mysqlStartupScript = '' mysqlUser = '' mysqlHost = '' mysqlPort = '' mysqlRootUser = '' mysqlSmallMem = '' mysqlLargeMem = '' loadDiracCfg() def getInfo( extensions ): result = getVersion() if not result['OK']: return result rDict = result['Value'] if setup: rDict['Setup'] = setup else: rDict['Setup'] = 'Unknown' return S_OK( rDict ) def getExtensions(): """ Get the list of installed extensions """ initList = glob.glob( os.path.join( rootPath, '*DIRAC', '__init__.py' ) ) extensions = [ os.path.basename( os.path.dirname( k ) ) for k in initList] try: extensions.remove( 'DIRAC' ) except Exception: error = 'DIRAC is not properly installed' gLogger.exception( error ) if exitOnError: DIRAC.exit( -1 ) return S_ERROR( error ) return S_OK( extensions ) def _addCfgToDiracCfg( cfg, verbose = False ): """ Merge cfg into existing dirac.cfg file """ global localCfg if str( localCfg ): newCfg = localCfg.mergeWith( cfg ) else: newCfg = cfg result = newCfg.writeToFile( cfgFile ) if not result: return result loadDiracCfg( verbose ) return result def _addCfgToCS( cfg ): """ Merge cfg into central CS """ cfgClient = CSAPI() result = cfgClient.downloadCSData() if not result['OK']: return result result = cfgClient.mergeFromCFG( cfg ) if not result['OK']: return result result = cfgClient.commit() return result def _addCfgToLocalCS( cfg ): """ Merge cfg into local CS """ csName = localCfg.getOption( cfgPath( 'DIRAC', 'Configuration', 'Name' ) , '' ) if not csName: error = 'Missing %s' % cfgPath( 'DIRAC', 'Configuration', 'Name' ) if exitOnError: gLogger.error( error ) DIRAC.exit( -1 ) return S_ERROR( error ) csCfg = CFG() csFile = os.path.join( rootPath, 'etc', '%s.cfg' % csName ) if os.path.exists( csFile ): csCfg.loadFromFile( csFile ) if str( csCfg ): newCfg = csCfg.mergeWith( cfg ) else: newCfg = cfg return newCfg.writeToFile( csFile ) def _getCentralCfg( installCfg ): """ Create the skeleton of central Cfg for an initial Master CS """ # First copy over from installation cfg centralCfg = CFG() # DIRAC/Extensions extensions = localCfg.getOption( cfgInstallPath( 'Extensions' ), [] ) while 'Web' in list( extensions ): extensions.remove( 'Web' ) centralCfg.createNewSection( 'DIRAC', '' ) if extensions: centralCfg['DIRAC'].addKey( 'Extensions', ','.join( extensions ), '' ) vo = localCfg.getOption( cfgInstallPath( 'VirtualOrganization' ), '' ) if vo: centralCfg['DIRAC'].addKey( 'VirtualOrganization', vo, '' ) for section in [ 'Systems', 'Resources', 'Resources/Sites', 'Resources/Domains', 'Operations', 'Website', 'Registry' ]: if installCfg.isSection( section ): centralCfg.createNewSection( section, contents = installCfg[section] ) # Now try to add things from the Installation section # Registry adminUserName = localCfg.getOption( cfgInstallPath( 'AdminUserName' ), '' ) adminUserDN = localCfg.getOption( cfgInstallPath( 'AdminUserDN' ), '' ) adminUserEmail = localCfg.getOption( cfgInstallPath( 'AdminUserEmail' ), '' ) adminGroupName = localCfg.getOption( cfgInstallPath( 'AdminGroupName' ), 'dirac_admin' ) hostDN = localCfg.getOption( cfgInstallPath( 'HostDN' ), '' ) defaultGroupName = 'user' adminGroupProperties = [ ALARMS_MANAGEMENT, SERVICE_ADMINISTRATOR, CS_ADMINISTRATOR, JOB_ADMINISTRATOR, FULL_DELEGATION, PROXY_MANAGEMENT, OPERATOR ] defaultGroupProperties = [ NORMAL_USER ] defaultHostProperties = [ TRUSTED_HOST, CS_ADMINISTRATOR, JOB_ADMINISTRATOR, FULL_DELEGATION, PROXY_MANAGEMENT, OPERATOR ] for section in ( cfgPath( 'Registry' ), cfgPath( 'Registry', 'Users' ), cfgPath( 'Registry', 'Groups' ), cfgPath( 'Registry', 'Hosts' ) ): if not centralCfg.isSection( section ): centralCfg.createNewSection( section ) if adminUserName: if not ( adminUserDN and adminUserEmail ): gLogger.error( 'AdminUserName is given but DN or Mail is missing it will not be configured' ) else: for section in [ cfgPath( 'Registry', 'Users', adminUserName ), cfgPath( 'Registry', 'Groups', defaultGroupName ), cfgPath( 'Registry', 'Groups', adminGroupName ) ]: if not centralCfg.isSection( section ): centralCfg.createNewSection( section ) if centralCfg['Registry'].existsKey( 'DefaultGroup' ): centralCfg['Registry'].deleteKey( 'DefaultGroup' ) centralCfg['Registry'].addKey( 'DefaultGroup', defaultGroupName, '' ) if centralCfg['Registry']['Users'][adminUserName].existsKey( 'DN' ): centralCfg['Registry']['Users'][adminUserName].deleteKey( 'DN' ) centralCfg['Registry']['Users'][adminUserName].addKey( 'DN', adminUserDN, '' ) if centralCfg['Registry']['Users'][adminUserName].existsKey( 'Email' ): centralCfg['Registry']['Users'][adminUserName].deleteKey( 'Email' ) centralCfg['Registry']['Users'][adminUserName].addKey( 'Email' , adminUserEmail, '' ) # Add Admin User to Admin Group and default group for group in [adminGroupName, defaultGroupName]: if not centralCfg['Registry']['Groups'][group].isOption( 'Users' ): centralCfg['Registry']['Groups'][group].addKey( 'Users', '', '' ) users = centralCfg['Registry']['Groups'][group].getOption( 'Users', [] ) if adminUserName not in users: centralCfg['Registry']['Groups'][group].appendToOption( 'Users', ', %s' % adminUserName ) if not centralCfg['Registry']['Groups'][group].isOption( 'Properties' ): centralCfg['Registry']['Groups'][group].addKey( 'Properties', '', '' ) properties = centralCfg['Registry']['Groups'][adminGroupName].getOption( 'Properties', [] ) for prop in adminGroupProperties: if prop not in properties: properties.append( prop ) centralCfg['Registry']['Groups'][adminGroupName].appendToOption( 'Properties', ', %s' % prop ) properties = centralCfg['Registry']['Groups'][defaultGroupName].getOption( 'Properties', [] ) for prop in defaultGroupProperties: if prop not in properties: properties.append( prop ) centralCfg['Registry']['Groups'][defaultGroupName].appendToOption( 'Properties', ', %s' % prop ) # Add the master Host description if hostDN: hostSection = cfgPath( 'Registry', 'Hosts', host ) if not centralCfg.isSection( hostSection ): centralCfg.createNewSection( hostSection ) if centralCfg['Registry']['Hosts'][host].existsKey( 'DN' ): centralCfg['Registry']['Hosts'][host].deleteKey( 'DN' ) centralCfg['Registry']['Hosts'][host].addKey( 'DN', hostDN, '' ) if not centralCfg['Registry']['Hosts'][host].isOption( 'Properties' ): centralCfg['Registry']['Hosts'][host].addKey( 'Properties', '', '' ) properties = centralCfg['Registry']['Hosts'][host].getOption( 'Properties', [] ) for prop in defaultHostProperties: if prop not in properties: properties.append( prop ) centralCfg['Registry']['Hosts'][host].appendToOption( 'Properties', ', %s' % prop ) # Operations if adminUserEmail: operationsCfg = __getCfg( cfgPath( 'Operations', 'Defaults', 'EMail' ), 'Production', adminUserEmail ) centralCfg = centralCfg.mergeWith( operationsCfg ) operationsCfg = __getCfg( cfgPath( 'Operations', 'Defaults', 'EMail' ), 'Logging', adminUserEmail ) centralCfg = centralCfg.mergeWith( operationsCfg ) # Website websiteCfg = __getCfg( cfgPath( 'Website', 'Authorization', 'systems', 'configuration' ), 'Default', 'all' ) websiteCfg['Website'].addKey( 'DefaultGroups', ', '.join( ['visitor', defaultGroupName, adminGroupName] ), '' ) websiteCfg['Website'].addKey( 'DefaultSetup', setup, '' ) websiteCfg['Website']['Authorization']['systems']['configuration'].addKey( 'showHistory' , 'CSAdministrator' , '' ) websiteCfg['Website']['Authorization']['systems']['configuration'].addKey( 'commitConfiguration' , 'CSAdministrator' , '' ) websiteCfg['Website']['Authorization']['systems']['configuration'].addKey( 'showCurrentDiff' , 'CSAdministrator' , '' ) websiteCfg['Website']['Authorization']['systems']['configuration'].addKey( 'showDiff' , 'CSAdministrator' , '' ) websiteCfg['Website']['Authorization']['systems']['configuration'].addKey( 'rollbackToVersion' , 'CSAdministrator' , '' ) websiteCfg['Website']['Authorization']['systems']['configuration'].addKey( 'manageRemoteConfig' , 'CSAdministrator' , '' ) websiteCfg['Website']['Authorization']['systems']['configuration'].appendToOption( 'manageRemoteConfig' , ', ServiceAdministrator' ) centralCfg = centralCfg.mergeWith( websiteCfg ) return centralCfg def __getCfg( section, option = '', value = '' ): """ Create a new Cfg with given info """ if not section: return None cfg = CFG() sectionList = [] for sect in cfgPathToList( section ): if not sect: continue sectionList.append( sect ) cfg.createNewSection( cfgPath( *sectionList ) ) if not sectionList: return None if option and value: sectionList.append( option ) cfg.setOption( cfgPath( *sectionList ), value ) return cfg def addOptionToDiracCfg( option, value ): """ Add Option to dirac.cfg """ optionList = cfgPathToList( option ) optionName = optionList[-1] section = cfgPath( *optionList[:-1] ) cfg = __getCfg( section, optionName, value ) if not cfg: return S_ERROR( 'Wrong option: %s = %s' % ( option, value ) ) if _addCfgToDiracCfg( cfg ): return S_OK() return S_ERROR( 'Could not merge %s=%s with local configuration' % ( option, value ) ) def addDefaultOptionsToCS( gConfig, componentType, systemName, component, extensions, mySetup = setup, specialOptions = {}, overwrite = False, addDefaultOptions = True ): """ Add the section with the component options to the CS """ system = systemName.replace( 'System', '' ) instanceOption = cfgPath( 'DIRAC', 'Setups', mySetup, system ) if gConfig: compInstance = gConfig.getValue( instanceOption, '' ) else: compInstance = localCfg.getOption( instanceOption, '' ) if not compInstance: return S_ERROR( '%s not defined in %s' % ( instanceOption, cfgFile ) ) sectionName = "Agents" if componentType == 'service': sectionName = "Services" elif componentType == 'executor': sectionName = "Executors" # Check if the component CS options exist addOptions = True componentSection = cfgPath( 'Systems', system, compInstance, sectionName, component ) if not overwrite: if gConfig: result = gConfig.getOptions( componentSection ) if result['OK']: addOptions = False if not addOptions: return S_OK( 'Component options already exist' ) # Add the component options now result = getComponentCfg( componentType, system, component, compInstance, extensions, specialOptions, addDefaultOptions ) if not result['OK']: return result compCfg = result['Value'] gLogger.notice( 'Adding to CS', '%s %s/%s' % ( componentType, system, component ) ) resultAddToCFG = _addCfgToCS( compCfg ) if componentType == 'executor': # Is it a container ? execList = compCfg.getOption( '%s/Load' % componentSection, [] ) for element in execList: result = addDefaultOptionsToCS( gConfig, componentType, systemName, element, extensions, setup, {}, overwrite ) resultAddToCFG.setdefault( 'Modules', {} ) resultAddToCFG['Modules'][element] = result['OK'] return resultAddToCFG def addDefaultOptionsToComponentCfg( componentType, systemName, component, extensions ): """ Add default component options local component cfg """ system = systemName.replace( 'System', '' ) instanceOption = cfgPath( 'DIRAC', 'Setups', setup, system ) compInstance = localCfg.getOption( instanceOption, '' ) if not compInstance: return S_ERROR( '%s not defined in %s' % ( instanceOption, cfgFile ) ) # Add the component options now result = getComponentCfg( componentType, system, component, compInstance, extensions ) if not result['OK']: return result compCfg = result['Value'] compCfgFile = os.path.join( rootPath, 'etc', '%s_%s.cfg' % ( system, component ) ) return compCfg.writeToFile( compCfgFile ) def addCfgToComponentCfg( componentType, systemName, component, cfg ): """ Add some extra configuration to the local component cfg """ sectionName = 'Services' if componentType == 'agent': sectionName = 'Agents' if not cfg: return S_OK() system = systemName.replace( 'System', '' ) instanceOption = cfgPath( 'DIRAC', 'Setups', setup, system ) compInstance = localCfg.getOption( instanceOption, '' ) if not compInstance: return S_ERROR( '%s not defined in %s' % ( instanceOption, cfgFile ) ) compCfgFile = os.path.join( rootPath, 'etc', '%s_%s.cfg' % ( system, component ) ) compCfg = CFG() if os.path.exists( compCfgFile ): compCfg.loadFromFile( compCfgFile ) sectionPath = cfgPath( 'Systems', system, compInstance, sectionName ) newCfg = __getCfg( sectionPath ) newCfg.createNewSection( cfgPath( sectionPath, component ), 'Added by InstallTools', cfg ) if newCfg.writeToFile( compCfgFile ): return S_OK( compCfgFile ) error = 'Can not write %s' % compCfgFile gLogger.error( error ) return S_ERROR( error ) def getComponentCfg( componentType, system, component, compInstance, extensions, specialOptions = {}, addDefaultOptions = True ): """ Get the CFG object of the component configuration """ sectionName = 'Services' if componentType == 'agent': sectionName = 'Agents' if componentType == 'executor': sectionName = 'Executors' componentModule = component if "Module" in specialOptions: componentModule = specialOptions['Module'] compCfg = CFG() if addDefaultOptions: extensionsDIRAC = [ x + 'DIRAC' for x in extensions ] + extensions for ext in extensionsDIRAC + ['DIRAC']: cfgTemplatePath = os.path.join( rootPath, ext, '%sSystem' % system, 'ConfigTemplate.cfg' ) if os.path.exists( cfgTemplatePath ): gLogger.notice( 'Loading configuration template', cfgTemplatePath ) # Look up the component in this template loadCfg = CFG() loadCfg.loadFromFile( cfgTemplatePath ) compCfg = loadCfg.mergeWith( compCfg ) compPath = cfgPath( sectionName, componentModule ) if not compCfg.isSection( compPath ): error = 'Can not find %s in template' % compPath gLogger.error( error ) if exitOnError: DIRAC.exit( -1 ) return S_ERROR( error ) compCfg = compCfg[sectionName][componentModule] # Delete Dependencies section if any compCfg.deleteKey( 'Dependencies' ) sectionPath = cfgPath( 'Systems', system, compInstance, sectionName ) cfg = __getCfg( sectionPath ) cfg.createNewSection( cfgPath( sectionPath, component ), '', compCfg ) for option, value in specialOptions.items(): cfg.setOption( cfgPath( sectionPath, component, option ), value ) # Add the service URL if componentType == "service": port = compCfg.getOption( 'Port' , 0 ) if port and host: urlsPath = cfgPath( 'Systems', system, compInstance, 'URLs' ) cfg.createNewSection( urlsPath ) cfg.setOption( cfgPath( urlsPath, component ), 'dips://%s:%d/%s/%s' % ( host, port, system, component ) ) return S_OK( cfg ) def addDatabaseOptionsToCS( gConfig, systemName, dbName, mySetup = setup, overwrite = False ): """ Add the section with the database options to the CS """ system = systemName.replace( 'System', '' ) instanceOption = cfgPath( 'DIRAC', 'Setups', mySetup, system ) if gConfig: compInstance = gConfig.getValue( instanceOption, '' ) else: compInstance = localCfg.getOption( instanceOption, '' ) if not compInstance: return S_ERROR( '%s not defined in %s' % ( instanceOption, cfgFile ) ) # Check if the component CS options exist addOptions = True if not overwrite: databasePath = cfgPath( 'Systems', system, compInstance, 'Databases', dbName ) result = gConfig.getOptions( databasePath ) if result['OK']: addOptions = False if not addOptions: return S_OK( 'Database options already exist' ) # Add the component options now result = getDatabaseCfg( system, dbName, compInstance ) if not result['OK']: return result databaseCfg = result['Value'] gLogger.notice( 'Adding to CS', '%s/%s' % ( system, dbName ) ) return _addCfgToCS( databaseCfg ) def getDatabaseCfg( system, dbName, compInstance ): """ Get the CFG object of the database configuration """ databasePath = cfgPath( 'Systems', system, compInstance, 'Databases', dbName ) cfg = __getCfg( databasePath, 'DBName', dbName ) cfg.setOption( cfgPath( databasePath, 'Host' ), mysqlHost ) cfg.setOption( cfgPath( databasePath, 'Port' ), mysqlPort ) return S_OK( cfg ) def addSystemInstance( systemName, compInstance, mySetup = setup, myCfg = False ): """ Add a new system instance to dirac.cfg and CS """ system = systemName.replace( 'System', '' ) gLogger.notice( 'Adding %s system as %s instance for %s setup to dirac.cfg and CS' % ( system, compInstance, mySetup ) ) cfg = __getCfg( cfgPath( 'DIRAC', 'Setups', mySetup ), system, compInstance ) if myCfg: if not _addCfgToDiracCfg( cfg ): return S_ERROR( 'Failed to add system instance to dirac.cfg' ) return _addCfgToCS( cfg ) def printStartupStatus( rDict ): """ Print in nice format the return dictionary from getStartupComponentStatus (also returned by runsvctrlComponent) """ fields = ['Name','Runit','Uptime','PID'] records = [] try: for comp in rDict: records.append( [comp, rDict[comp]['RunitStatus'], rDict[comp]['Timeup'], str( rDict[comp]['PID'] ) ] ) printTable( fields, records ) except Exception, x: print "Exception while gathering data for printing: %s" % str( x ) return S_OK() def printOverallStatus( rDict ): """ Print in nice format the return dictionary from getOverallStatus """ fields = ['System','Name','Type','Setup','Installed','Runit','Uptime','PID'] records = [] try: for compType in rDict: for system in rDict[compType]: for component in rDict[compType][system]: record = [ system, component, compType.lower()[:-1] ] if rDict[compType][system][component]['Setup']: record.append( 'SetUp' ) else: record.append( 'NotSetUp' ) if rDict[compType][system][component]['Installed']: record.append( 'Installed' ) else: record.append( 'NotInstalled' ) record.append( str( rDict[compType][system][component]['RunitStatus'] ) ) record.append( str( rDict[compType][system][component]['Timeup'] ) ) record.append( str( rDict[compType][system][component]['PID'] ) ) records.append( record ) printTable( fields, records ) except Exception, x: print "Exception while gathering data for printing: %s" % str( x ) return S_OK() def getAvailableSystems( extensions ): """ Get the list of all systems (in all given extensions) locally available """ systems = [] for extension in extensions: extensionPath = os.path.join( DIRAC.rootPath, extension, '*System' ) for system in [ os.path.basename( k ).split( 'System' )[0] for k in glob.glob( extensionPath ) ]: if system not in systems: systems.append( system ) return systems def getSoftwareComponents( extensions ): """ Get the list of all the components ( services and agents ) for which the software is installed on the system """ # The Gateway does not need a handler services = { 'Framework' : ['Gateway'] } agents = {} executors = {} for extension in ['DIRAC'] + [ x + 'DIRAC' for x in extensions]: if not os.path.exists( os.path.join( rootPath, extension ) ): # Not all the extensions are necessarily installed in this instance continue systemList = os.listdir( os.path.join( rootPath, extension ) ) for sys in systemList: system = sys.replace( 'System', '' ) try: agentDir = os.path.join( rootPath, extension, sys, 'Agent' ) agentList = os.listdir( agentDir ) for agent in agentList: if agent[-3:] == ".py": agentFile = os.path.join( agentDir, agent ) afile = open( agentFile, 'r' ) body = afile.read() afile.close() if body.find( 'AgentModule' ) != -1 or body.find( 'OptimizerModule' ) != -1: if not agents.has_key( system ): agents[system] = [] agents[system].append( agent.replace( '.py', '' ) ) except OSError: pass try: serviceDir = os.path.join( rootPath, extension, sys, 'Service' ) serviceList = os.listdir( serviceDir ) for service in serviceList: if service.find( 'Handler' ) != -1 and service[-3:] == '.py': if not services.has_key( system ): services[system] = [] if system == 'Configuration' and service == 'ConfigurationHandler.py': service = 'ServerHandler.py' services[system].append( service.replace( '.py', '' ).replace( 'Handler', '' ) ) except OSError: pass try: executorDir = os.path.join( rootPath, extension, sys, 'Executor' ) executorList = os.listdir( executorDir ) for executor in executorList: if executor[-3:] == ".py": executorFile = os.path.join( executorDir, executor ) afile = open( executorFile, 'r' ) body = afile.read() afile.close() if body.find( 'OptimizerExecutor' ) != -1: if not executors.has_key( system ): executors[system] = [] executors[system].append( executor.replace( '.py', '' ) ) except OSError: pass resultDict = {} resultDict['Services'] = services resultDict['Agents'] = agents resultDict['Executors'] = executors return S_OK( resultDict ) def getInstalledComponents(): """ Get the list of all the components ( services and agents ) installed on the system in the runit directory """ services = {} agents = {} executors = {} systemList = os.listdir( runitDir ) for system in systemList: systemDir = os.path.join( runitDir, system ) components = os.listdir( systemDir ) for component in components: try: runFile = os.path.join( systemDir, component, 'run' ) rfile = open( runFile, 'r' ) body = rfile.read() rfile.close() if body.find( 'dirac-service' ) != -1: if not services.has_key( system ): services[system] = [] services[system].append( component ) elif body.find( 'dirac-agent' ) != -1: if not agents.has_key( system ): agents[system] = [] agents[system].append( component ) elif body.find( 'dirac-executor' ) != -1: if not executors.has_key( system ): executors[system] = [] executors[system].append( component ) except IOError: pass resultDict = {} resultDict['Services'] = services resultDict['Agents'] = agents resultDict['Executors'] = executors return S_OK( resultDict ) def getSetupComponents(): """ Get the list of all the components ( services and agents ) set up for running with runsvdir in startup directory """ services = {} agents = {} executors = {} if not os.path.isdir( startDir ): return S_ERROR( 'Startup Directory does not exit: %s' % startDir ) componentList = os.listdir( startDir ) for component in componentList: try: runFile = os.path.join( startDir, component, 'run' ) rfile = open( runFile, 'r' ) body = rfile.read() rfile.close() if body.find( 'dirac-service' ) != -1: system, service = component.split( '_' )[0:2] if not services.has_key( system ): services[system] = [] services[system].append( service ) elif body.find( 'dirac-agent' ) != -1: system, agent = component.split( '_' )[0:2] if not agents.has_key( system ): agents[system] = [] agents[system].append( agent ) elif body.find( 'dirac-executor' ) != -1: system, executor = component.split( '_' )[0:2] if not executors.has_key( system ): executors[system] = [] executors[system].append( executor ) except IOError: pass resultDict = {} resultDict['Services'] = services resultDict['Agents'] = agents resultDict['Executors'] = executors return S_OK( resultDict ) def getStartupComponentStatus( componentTupleList ): """ Get the list of all the components ( services and agents ) set up for running with runsvdir in startup directory """ try: if componentTupleList: cList = [] for componentTuple in componentTupleList: cList.extend( glob.glob( os.path.join( startDir, '_'.join( componentTuple ) ) ) ) else: cList = glob.glob( os.path.join( startDir, '*' ) ) except Exception: error = 'Failed to parse List of Components' gLogger.exception( error ) if exitOnError: DIRAC.exit( -1 ) return S_ERROR( error ) result = execCommand( 0, ['runsvstat'] + cList ) if not result['OK']: return result output = result['Value'][1].strip().split( '\n' ) componentDict = {} for line in output: if not line: continue cname, routput = line.split( ':' ) cname = cname.replace( '%s/' % startDir, '' ) run = False reResult = re.search( '^ run', routput ) if reResult: run = True down = False reResult = re.search( '^ down', routput ) if reResult: down = True reResult = re.search( '([0-9]+) seconds', routput ) timeup = 0 if reResult: timeup = reResult.group( 1 ) reResult = re.search( 'pid ([0-9]+)', routput ) pid = 0 if reResult: pid = reResult.group( 1 ) runsv = "Not running" if run or down: runsv = "Running" reResult = re.search( 'runsv not running', routput ) if reResult: runsv = "Not running" runDict = {} runDict['Timeup'] = timeup runDict['PID'] = pid runDict['RunitStatus'] = "Unknown" if run: runDict['RunitStatus'] = "Run" if down: runDict['RunitStatus'] = "Down" if runsv == "Not running": runDict['RunitStatus'] = "NoRunitControl" componentDict[cname] = runDict return S_OK( componentDict ) def getComponentModule( gConfig, system, component, compType ): """ Get the component software module """ setup = CSGlobals.getSetup() instance = gConfig.getValue( cfgPath( 'DIRAC', 'Setups', setup, system ), '' ) if not instance: return S_OK( component ) module = gConfig.getValue( cfgPath( 'Systems', system, instance, compType, component, 'Module' ), '' ) if not module: module = component return S_OK( module ) def getOverallStatus( extensions ): """ Get the list of all the components ( services and agents ) set up for running with runsvdir in startup directory """ result = getSoftwareComponents( extensions ) if not result['OK']: return result softDict = result['Value'] result = getSetupComponents() if not result['OK']: return result setupDict = result['Value'] result = getInstalledComponents() if not result['OK']: return result installedDict = result['Value'] result = getStartupComponentStatus( [] ) if not result['OK']: return result runitDict = result['Value'] # Collect the info now resultDict = {'Services':{}, 'Agents':{}, 'Executors':{} } for compType in ['Services', 'Agents', 'Executors' ]: if softDict.has_key( 'Services' ): for system in softDict[compType]: resultDict[compType][system] = {} for component in softDict[compType][system]: if system == 'Configuration' and component == 'Configuration': # Fix to avoid missing CS due to different between Service name and Handler name component = 'Server' resultDict[compType][system][component] = {} resultDict[compType][system][component]['Setup'] = False resultDict[compType][system][component]['Installed'] = False resultDict[compType][system][component]['RunitStatus'] = 'Unknown' resultDict[compType][system][component]['Timeup'] = 0 resultDict[compType][system][component]['PID'] = 0 # TODO: why do we need a try here? try: if component in setupDict[compType][system]: resultDict[compType][system][component]['Setup'] = True except Exception: pass try: if component in installedDict[compType][system]: resultDict[compType][system][component]['Installed'] = True except Exception: pass try: compDir = system + '_' + component if runitDict.has_key( compDir ): resultDict[compType][system][component]['RunitStatus'] = runitDict[compDir]['RunitStatus'] resultDict[compType][system][component]['Timeup'] = runitDict[compDir]['Timeup'] resultDict[compType][system][component]['PID'] = runitDict[compDir]['PID'] except Exception: #print str(x) pass # Installed components can be not the same as in the software list if installedDict.has_key( 'Services' ): for system in installedDict[compType]: for component in installedDict[compType][system]: if compType in resultDict: if system in resultDict[compType]: if component in resultDict[compType][system]: continue resultDict[compType][system][component] = {} resultDict[compType][system][component]['Setup'] = False resultDict[compType][system][component]['Installed'] = True resultDict[compType][system][component]['RunitStatus'] = 'Unknown' resultDict[compType][system][component]['Timeup'] = 0 resultDict[compType][system][component]['PID'] = 0 # TODO: why do we need a try here? try: if component in setupDict[compType][system]: resultDict[compType][system][component]['Setup'] = True except Exception: pass try: compDir = system + '_' + component if runitDict.has_key( compDir ): resultDict[compType][system][component]['RunitStatus'] = runitDict[compDir]['RunitStatus'] resultDict[compType][system][component]['Timeup'] = runitDict[compDir]['Timeup'] resultDict[compType][system][component]['PID'] = runitDict[compDir]['PID'] except Exception: #print str(x) pass return S_OK( resultDict ) def checkComponentModule( componentType, system, module ): """ Check existence of the given module and if it inherits from the proper class """ if componentType == 'agent': loader = ModuleLoader( "Agent", PathFinder.getAgentSection, AgentModule ) elif componentType == 'service': loader = ModuleLoader( "Service", PathFinder.getServiceSection, RequestHandler, moduleSuffix = "Handler" ) elif componentType == 'executor': loader = ModuleLoader( "Executor", PathFinder.getExecutorSection, ExecutorModule ) else: return S_ERROR( 'Unknown component type %s' % componentType ) return loader.loadModule( "%s/%s" % ( system, module ) ) def checkComponentSoftware( componentType, system, component, extensions ): """ Check the component software """ result = getSoftwareComponents( extensions ) if not result['OK']: return result if componentType == 'service': softDict = result['Value']['Services'] elif componentType == 'agent': softDict = result['Value']['Agents'] else: return S_ERROR( 'Unknown component type %s' % componentType ) if system in softDict and component in softDict[system]: return S_OK() return S_ERROR( 'Unknown Component %s/%s' % ( system, component ) ) def runsvctrlComponent( system, component, mode ): """ Execute runsvctrl and check status of the specified component """ if not mode in ['u', 'd', 'o', 'p', 'c', 'h', 'a', 'i', 'q', '1', '2', 't', 'k', 'x', 'e']: return S_ERROR( 'Unknown runsvctrl mode "%s"' % mode ) startCompDirs = glob.glob( os.path.join( startDir, '%s_%s' % ( system, component ) ) ) # Make sure that the Configuration server restarts first and the SystemAdmin restarts last tmpList = list( startCompDirs ) for comp in tmpList: if "Framework_SystemAdministrator" in comp: startCompDirs.append( startCompDirs.pop( startCompDirs.index( comp ) ) ) if "Configuration_Server" in comp: startCompDirs.insert( 0, startCompDirs.pop( startCompDirs.index( comp ) ) ) startCompList = [ [k] for k in startCompDirs] for startComp in startCompList: result = execCommand( 0, ['runsvctrl', mode] + startComp ) if not result['OK']: return result time.sleep( 1 ) # Check the runsv status if system == '*' or component == '*': time.sleep( 5 ) # Final check result = getStartupComponentStatus( [( system, component )] ) if not result['OK']: return S_ERROR( 'Failed to start the component' ) return result def getLogTail( system, component, length = 100 ): """ Get the tail of the component log file """ retDict = {} for startCompDir in glob.glob( os.path.join( startDir, '%s_%s' % ( system, component ) ) ): compName = os.path.basename( startCompDir ) logFileName = os.path.join( startCompDir, 'log', 'current' ) if not os.path.exists( logFileName ): retDict[compName] = 'No log file found' else: logFile = open( logFileName, 'r' ) lines = [ line.strip() for line in logFile.readlines() ] logFile.close() if len( lines ) < length: retDict[compName] = '\n'.join( lines ) else: retDict[compName] = '\n'.join( lines[-length:] ) return S_OK( retDict ) def setupSite( scriptCfg, cfg = None ): """ Setup a new site using the options defined """ # First we need to find out what needs to be installed # by default use dirac.cfg, but if a cfg is given use it and # merge it into the dirac.cfg diracCfg = CFG() installCfg = None if cfg: try: installCfg = CFG() installCfg.loadFromFile( cfg ) for section in ['DIRAC', 'LocalSite', cfgInstallSection]: if installCfg.isSection( section ): diracCfg.createNewSection( section, contents = installCfg[section] ) if instancePath != basePath: if not diracCfg.isSection( 'LocalSite' ): diracCfg.createNewSection( 'LocalSite' ) diracCfg.setOption( cfgPath( 'LocalSite', 'InstancePath' ), instancePath ) _addCfgToDiracCfg( diracCfg, verbose = True ) except Exception: error = 'Failed to load %s' % cfg gLogger.exception( error ) if exitOnError: DIRAC.exit( -1 ) return S_ERROR( error ) # Now get the necessary info from localCfg setupSystems = localCfg.getOption( cfgInstallPath( 'Systems' ), ['Configuration', 'Framework'] ) installMySQLFlag = localCfg.getOption( cfgInstallPath( 'InstallMySQL' ), False ) setupDatabases = localCfg.getOption( cfgInstallPath( 'Databases' ), [] ) setupServices = [ k.split( '/' ) for k in localCfg.getOption( cfgInstallPath( 'Services' ), [] ) ] setupAgents = [ k.split( '/' ) for k in localCfg.getOption( cfgInstallPath( 'Agents' ), [] ) ] setupExecutors = [ k.split( '/' ) for k in localCfg.getOption( cfgInstallPath( 'Executors' ), [] ) ] setupWeb = localCfg.getOption( cfgInstallPath( 'WebPortal' ), False ) setupWebApp = localCfg.getOption( cfgInstallPath( 'WebApp' ), False ) setupConfigurationMaster = localCfg.getOption( cfgInstallPath( 'ConfigurationMaster' ), False ) setupPrivateConfiguration = localCfg.getOption( cfgInstallPath( 'PrivateConfiguration' ), False ) setupConfigurationName = localCfg.getOption( cfgInstallPath( 'ConfigurationName' ), setup ) setupAddConfiguration = localCfg.getOption( cfgInstallPath( 'AddConfiguration' ), True ) for serviceTuple in setupServices: error = '' if len( serviceTuple ) != 2: error = 'Wrong service specification: system/service' # elif serviceTuple[0] not in setupSystems: # error = 'System %s not available' % serviceTuple[0] if error: if exitOnError: gLogger.error( error ) DIRAC.exit( -1 ) return S_ERROR( error ) serviceSysInstance = serviceTuple[0] if not serviceSysInstance in setupSystems: setupSystems.append( serviceSysInstance ) for agentTuple in setupAgents: error = '' if len( agentTuple ) != 2: error = 'Wrong agent specification: system/agent' # elif agentTuple[0] not in setupSystems: # error = 'System %s not available' % agentTuple[0] if error: if exitOnError: gLogger.error( error ) DIRAC.exit( -1 ) return S_ERROR( error ) agentSysInstance = agentTuple[0] if not agentSysInstance in setupSystems: setupSystems.append( agentSysInstance ) for executorTuple in setupExecutors: error = '' if len( executorTuple ) != 2: error = 'Wrong executor specification: system/executor' if error: if exitOnError: gLogger.error( error ) DIRAC.exit( -1 ) return S_ERROR( error ) executorSysInstance = executorTuple[0] if not executorSysInstance in setupSystems: setupSystems.append( executorSysInstance ) # And to find out the available extensions result = getExtensions() if not result['OK']: return result extensions = [ k.replace( 'DIRAC', '' ) for k in result['Value']] # Make sure the necessary directories are there if basePath != instancePath: if not os.path.exists( instancePath ): try: os.makedirs( instancePath ) except Exception: error = 'Can not create directory for instance %s' % instancePath if exitOnError: gLogger.exception( error ) DIRAC.exit( -1 ) return S_ERROR( error ) if not os.path.isdir( instancePath ): error = 'Instance directory %s is not valid' % instancePath if exitOnError: gLogger.error( error ) DIRAC.exit( -1 ) return S_ERROR( error ) instanceEtcDir = os.path.join( instancePath, 'etc' ) etcDir = os.path.dirname( cfgFile ) if not os.path.exists( instanceEtcDir ): try: os.symlink( etcDir, instanceEtcDir ) except Exception: error = 'Can not create link to configuration %s' % instanceEtcDir if exitOnError: gLogger.exception( error ) DIRAC.exit( -1 ) return S_ERROR( error ) if os.path.realpath( instanceEtcDir ) != os.path.realpath( etcDir ): error = 'Instance etc (%s) is not the same as DIRAC etc (%s)' % ( instanceEtcDir, etcDir ) if exitOnError: gLogger.error( error ) DIRAC.exit( -1 ) return S_ERROR( error ) # if any server or agent needs to be install we need the startup directory and runsvdir running if setupServices or setupAgents or setupExecutors or setupWeb: if not os.path.exists( startDir ): try: os.makedirs( startDir ) except Exception: error = 'Can not create %s' % startDir if exitOnError: gLogger.exception( error ) DIRAC.exit( -1 ) return S_ERROR( error ) # And need to make sure runsvdir is running result = execCommand( 0, ['ps', '-ef'] ) if not result['OK']: if exitOnError: gLogger.error( 'Failed to verify runsvdir running', result['Message'] ) DIRAC.exit( -1 ) return S_ERROR( result['Message'] ) processList = result['Value'][1].split( '\n' ) cmd = 'runsvdir %s' % startDir cmdFound = False for process in processList: if process.find( cmd ) != -1: cmdFound = True if not cmdFound: gLogger.notice( 'Starting runsvdir ...' ) os.system( "runsvdir %s 'log: DIRAC runsv' &" % startDir ) if ['Configuration', 'Server'] in setupServices and setupConfigurationMaster: # This server hosts the Master of the CS from DIRAC.ConfigurationSystem.Client.ConfigurationData import gConfigurationData gLogger.notice( 'Installing Master Configuration Server' ) cfg = __getCfg( cfgPath( 'DIRAC', 'Setups', setup ), 'Configuration', instance ) _addCfgToDiracCfg( cfg ) cfg = __getCfg( cfgPath( 'DIRAC', 'Configuration' ), 'Master' , 'yes' ) cfg.setOption( cfgPath( 'DIRAC', 'Configuration', 'Name' ) , setupConfigurationName ) serversCfgPath = cfgPath( 'DIRAC', 'Configuration', 'Servers' ) if not localCfg.getOption( serversCfgPath , [] ): serverUrl = 'dips://%s:9135/Configuration/Server' % host cfg.setOption( serversCfgPath, serverUrl ) gConfigurationData.setOptionInCFG( serversCfgPath, serverUrl ) instanceOptionPath = cfgPath( 'DIRAC', 'Setups', setup ) instanceCfg = __getCfg( instanceOptionPath, 'Configuration', instance ) cfg = cfg.mergeWith( instanceCfg ) _addCfgToDiracCfg( cfg ) result = getComponentCfg( 'service', 'Configuration', 'Server', instance, extensions, addDefaultOptions = True ) if not result['OK']: if exitOnError: DIRAC.exit( -1 ) else: return result compCfg = result['Value'] cfg = cfg.mergeWith( compCfg ) gConfigurationData.mergeWithLocal( cfg ) addDefaultOptionsToComponentCfg( 'service', 'Configuration', 'Server', [] ) if installCfg: centralCfg = _getCentralCfg( installCfg ) else: centralCfg = _getCentralCfg( localCfg ) _addCfgToLocalCS( centralCfg ) setupComponent( 'service', 'Configuration', 'Server', [], checkModule = False ) runsvctrlComponent( 'Configuration', 'Server', 't' ) while ['Configuration', 'Server'] in setupServices: setupServices.remove( ['Configuration', 'Server'] ) time.sleep( 5 ) # Now need to check if there is valid CS to register the info result = scriptCfg.enableCS() if not result['OK']: if exitOnError: DIRAC.exit( -1 ) return result cfgClient = CSAPI() if not cfgClient.initialize(): error = 'Configuration Server not defined' if exitOnError: gLogger.error( error ) DIRAC.exit( -1 ) return S_ERROR( error ) # We need to make sure components are connecting to the Master CS, that is the only one being update from DIRAC import gConfig localServers = localCfg.getOption( cfgPath( 'DIRAC', 'Configuration', 'Servers' ) ) masterServer = gConfig.getValue( cfgPath( 'DIRAC', 'Configuration', 'MasterServer' ), '' ) initialCfg = __getCfg( cfgPath( 'DIRAC', 'Configuration' ), 'Servers' , localServers ) masterCfg = __getCfg( cfgPath( 'DIRAC', 'Configuration' ), 'Servers' , masterServer ) _addCfgToDiracCfg( masterCfg ) # 1.- Setup the instances in the CS # If the Configuration Server used is not the Master, it can take some time for this # info to be propagated, this may cause the later setup to fail if setupAddConfiguration: gLogger.notice( 'Registering System instances' ) for system in setupSystems: addSystemInstance( system, instance, setup, True ) for system, service in setupServices: if not addDefaultOptionsToCS( None, 'service', system, service, extensions, overwrite = True )['OK']: # If we are not allowed to write to the central CS, add the configuration to the local file addDefaultOptionsToComponentCfg( 'service', system, service, extensions ) for system, agent in setupAgents: if not addDefaultOptionsToCS( None, 'agent', system, agent, extensions, overwrite = True )['OK']: # If we are not allowed to write to the central CS, add the configuration to the local file addDefaultOptionsToComponentCfg( 'agent', system, agent, extensions ) for system, executor in setupExecutors: if not addDefaultOptionsToCS( None, 'executor', system, executor, extensions, overwrite = True )['OK']: # If we are not allowed to write to the central CS, add the configuration to the local file addDefaultOptionsToComponentCfg( 'executor', system, executor, extensions ) else: gLogger.warn( 'Configuration parameters definition is not requested' ) if ['Configuration', 'Server'] in setupServices and setupPrivateConfiguration: cfg = __getCfg( cfgPath( 'DIRAC', 'Configuration' ), 'AutoPublish' , 'no' ) _addCfgToDiracCfg( cfg ) # 2.- Check if MySQL is to be installed if installMySQLFlag: gLogger.notice( 'Installing MySQL' ) getMySQLPasswords() installMySQL() # 3.- Install requested Databases # if MySQL is not installed locally, we assume a host is given if setupDatabases: result = getDatabases() if not result['OK']: if exitOnError: gLogger.error( 'Failed to get databases', result['Message'] ) DIRAC.exit( -1 ) return result installedDatabases = result['Value'] for dbName in setupDatabases: if dbName not in installedDatabases: extension, system = installDatabase( dbName )['Value'] gLogger.notice( 'Database %s from %s/%s installed' % ( dbName, extension, system ) ) result = addDatabaseOptionsToCS( None, system, dbName, overwrite = True ) if not result['OK']: gLogger.error( 'Database %s CS registration failed: %s' % ( dbName, result['Message'] ) ) else: gLogger.notice( 'Database %s already installed' % dbName ) if mysqlPassword: if not _addMySQLToDiracCfg(): error = 'Failed to add MySQL user password to local configuration' if exitOnError: gLogger.error( error ) DIRAC.exit( -1 ) return S_ERROR( error ) # 4.- Then installed requested services for system, service in setupServices: setupComponent( 'service', system, service, extensions ) # 5.- Now the agents for system, agent in setupAgents: setupComponent( 'agent', system, agent, extensions ) # 6.- Now the executors for system, executor in setupExecutors: setupComponent( 'executor', system, executor, extensions ) # 7.- And finally the Portal if setupWeb: if setupWebApp: setupNewPortal() else: setupPortal() if localServers != masterServer: _addCfgToDiracCfg( initialCfg ) for system, service in setupServices: runsvctrlComponent( system, service, 't' ) for system, agent in setupAgents: runsvctrlComponent( system, agent, 't' ) for system, executor in setupExecutors: runsvctrlComponent( system, executor, 't' ) return S_OK() def _createRunitLog( runitCompDir ): controlDir = os.path.join( runitCompDir, 'control' ) os.makedirs( controlDir ) logDir = os.path.join( runitCompDir, 'log' ) os.makedirs( logDir ) logConfigFile = os.path.join( logDir, 'config' ) fd = open( logConfigFile, 'w' ) fd.write( """s10000000 n20 """ ) fd.close() logRunFile = os.path.join( logDir, 'run' ) fd = open( logRunFile, 'w' ) fd.write( """#!/bin/bash # rcfile=%(bashrc)s [ -e $rcfile ] && source $rcfile # exec svlogd . """ % { 'bashrc' : os.path.join( instancePath, 'bashrc' ) } ) fd.close() os.chmod( logRunFile, gDefaultPerms ) def installComponent( componentType, system, component, extensions, componentModule = '', checkModule = True ): """ Install runit directory for the specified component """ # Check if the component is already installed runitCompDir = os.path.join( runitDir, system, component ) if os.path.exists( runitCompDir ): msg = "%s %s_%s already installed" % ( componentType, system, component ) gLogger.notice( msg ) return S_OK( runitCompDir ) # Check that the software for the component is installed # Any "Load" or "Module" option in the configuration defining what modules the given "component" # needs to load will be taken care of by checkComponentModule. if checkModule: cModule = componentModule if not cModule: cModule = component result = checkComponentModule( componentType, system, cModule ) if not result['OK']: if not checkComponentSoftware( componentType, system, cModule, extensions )['OK'] and componentType != 'executor': error = 'Software for %s %s/%s is not installed' % ( componentType, system, component ) if exitOnError: gLogger.error( error ) DIRAC.exit( -1 ) return S_ERROR( error ) gLogger.notice( 'Installing %s %s/%s' % ( componentType, system, component ) ) # Now do the actual installation try: componentCfg = os.path.join( linkedRootPath, 'etc', '%s_%s.cfg' % ( system, component ) ) if not os.path.exists( componentCfg ): fd = open( componentCfg, 'w' ) fd.close() _createRunitLog( runitCompDir ) runFile = os.path.join( runitCompDir, 'run' ) fd = open( runFile, 'w' ) fd.write( """#!/bin/bash rcfile=%(bashrc)s [ -e $rcfile ] && source $rcfile # exec 2>&1 # [ "%(componentType)s" = "agent" ] && renice 20 -p $$ # exec python $DIRAC/DIRAC/Core/scripts/dirac-%(componentType)s.py %(system)s/%(component)s %(componentCfg)s < /dev/null """ % {'bashrc': os.path.join( instancePath, 'bashrc' ), 'componentType': componentType, 'system' : system, 'component': component, 'componentCfg': componentCfg } ) fd.close() os.chmod( runFile, gDefaultPerms ) if componentType.lower() == 'agent': stopFile = os.path.join( runitCompDir, 'control', 't' ) fd = open( stopFile, 'w' ) fd.write( """#!/bin/bash echo %(controlDir)s/%(system)s/%(component)s/stop_agent touch %(controlDir)s/%(system)s/%(component)s/stop_agent """ % {'controlDir': runitDir, 'system' : system, 'component': component } ) fd.close() os.chmod( stopFile, gDefaultPerms ) except Exception: error = 'Failed to prepare setup for %s %s/%s' % ( componentType, system, component ) gLogger.exception( error ) if exitOnError: DIRAC.exit( -1 ) return S_ERROR( error ) result = execCommand( 5, [runFile] ) gLogger.notice( result['Value'][1] ) return S_OK( runitCompDir ) def setupComponent( componentType, system, component, extensions, componentModule = '', checkModule = True ): """ Install and create link in startup """ result = installComponent( componentType, system, component, extensions, componentModule, checkModule ) if not result['OK']: return result # Create the startup entry now runitCompDir = result['Value'] startCompDir = os.path.join( startDir, '%s_%s' % ( system, component ) ) if not os.path.exists( startDir ): os.makedirs( startDir ) if not os.path.lexists( startCompDir ): gLogger.notice( 'Creating startup link at', startCompDir ) os.symlink( runitCompDir, startCompDir ) time.sleep( 10 ) # Check the runsv status start = time.time() while ( time.time() - 20 ) < start: result = getStartupComponentStatus( [ ( system, component )] ) if not result['OK']: continue if result['Value'] and result['Value']['%s_%s' % ( system, component )]['RunitStatus'] == "Run": break time.sleep( 1 ) # Final check result = getStartupComponentStatus( [( system, component )] ) if not result['OK']: return S_ERROR( 'Failed to start the component %s_%s' % ( system, component ) ) resDict = {} resDict['ComponentType'] = componentType resDict['RunitStatus'] = result['Value']['%s_%s' % ( system, component )]['RunitStatus'] return S_OK( resDict ) def unsetupComponent( system, component ): """ Remove link from startup """ for startCompDir in glob.glob( os.path.join( startDir, '%s_%s' % ( system, component ) ) ): try: os.unlink( startCompDir ) except Exception: gLogger.exception() return S_OK() def uninstallComponent( system, component ): """ Remove startup and runit directories """ result = runsvctrlComponent( system, component, 'd' ) if not result['OK']: pass result = unsetupComponent( system, component ) for runitCompDir in glob.glob( os.path.join( runitDir, system, component ) ): try: shutil.rmtree( runitCompDir ) except Exception: gLogger.exception() return S_OK() def installPortal(): """ Install runit directories for the Web Portal """ # Check that the software for the Web Portal is installed error = '' webDir = os.path.join( linkedRootPath, 'Web' ) if not os.path.exists( webDir ): error = 'Web extension not installed at %s' % webDir if exitOnError: gLogger.error( error ) DIRAC.exit( -1 ) return S_ERROR( error ) # First the lighthttpd server # Check if the component is already installed runitHttpdDir = os.path.join( runitDir, 'Web', 'httpd' ) runitPasterDir = os.path.join( runitDir, 'Web', 'paster' ) if os.path.exists( runitHttpdDir ): msg = "lighthttpd already installed" gLogger.notice( msg ) else: gLogger.notice( 'Installing Lighttpd' ) # Now do the actual installation try: _createRunitLog( runitHttpdDir ) runFile = os.path.join( runitHttpdDir, 'run' ) fd = open( runFile, 'w' ) fd.write( """#!/bin/bash rcfile=%(bashrc)s [ -e $rcfile ] && source $rcfile # exec 2>&1 # exec lighttpdSvc.sh < /dev/null """ % {'bashrc': os.path.join( instancePath, 'bashrc' ), } ) fd.close() os.chmod( runFile, gDefaultPerms ) except Exception: error = 'Failed to prepare setup for lighttpd' gLogger.exception( error ) if exitOnError: DIRAC.exit( -1 ) return S_ERROR( error ) result = execCommand( 5, [runFile] ) gLogger.notice( result['Value'][1] ) # Second the Web portal # Check if the component is already installed if os.path.exists( runitPasterDir ): msg = "Web Portal already installed" gLogger.notice( msg ) else: gLogger.notice( 'Installing Web Portal' ) # Now do the actual installation try: _createRunitLog( runitPasterDir ) runFile = os.path.join( runitPasterDir, 'run' ) fd = open( runFile, 'w' ) fd.write( """#!/bin/bash rcfile=%(bashrc)s [ -e $rcfile ] && source $rcfile # exec 2>&1 # cd %(DIRAC)s/Web exec paster serve --reload production.ini < /dev/null """ % {'bashrc': os.path.join( instancePath, 'bashrc' ), 'DIRAC': linkedRootPath} ) fd.close() os.chmod( runFile, gDefaultPerms ) except Exception: error = 'Failed to prepare setup for Web Portal' gLogger.exception( error ) if exitOnError: DIRAC.exit( -1 ) return S_ERROR( error ) result = execCommand( 5, [runFile] ) gLogger.notice( result['Value'][1] ) return S_OK( [runitHttpdDir, runitPasterDir] ) def setupPortal(): """ Install and create link in startup """ result = installPortal() if not result['OK']: return result # Create the startup entries now runitCompDir = result['Value'] startCompDir = [ os.path.join( startDir, 'Web_httpd' ), os.path.join( startDir, 'Web_paster' ) ] if not os.path.exists( startDir ): os.makedirs( startDir ) for i in range( 2 ): if not os.path.lexists( startCompDir[i] ): gLogger.notice( 'Creating startup link at', startCompDir[i] ) os.symlink( runitCompDir[i], startCompDir[i] ) time.sleep( 1 ) time.sleep( 5 ) # Check the runsv status start = time.time() while ( time.time() - 10 ) < start: result = getStartupComponentStatus( [ ( 'Web', 'httpd' ), ( 'Web', 'paster' ) ] ) if not result['OK']: return S_ERROR( 'Failed to start the Portal' ) if result['Value'] and \ result['Value']['%s_%s' % ( 'Web', 'httpd' )]['RunitStatus'] == "Run" and \ result['Value']['%s_%s' % ( 'Web', 'paster' )]['RunitStatus'] == "Run" : break time.sleep( 1 ) # Final check return getStartupComponentStatus( [ ( 'Web', 'httpd' ), ( 'Web', 'paster' ) ] ) def setupNewPortal(): """ Install and create link in startup """ result = installNewPortal() if not result['OK']: return result # Create the startup entries now runitCompDir = result['Value'] startCompDir = os.path.join( startDir, 'Web_WebApp' ) if not os.path.exists( startDir ): os.makedirs( startDir ) if not os.path.lexists( startCompDir ): gLogger.notice( 'Creating startup link at', startCompDir ) os.symlink( runitCompDir, startCompDir ) time.sleep( 5 ) # Check the runsv status start = time.time() while ( time.time() - 10 ) < start: result = getStartupComponentStatus( [( 'Web', 'WebApp' )] ) if not result['OK']: return S_ERROR( 'Failed to start the Portal' ) if result['Value'] and \ result['Value']['%s_%s' % ( 'Web', 'WebApp' )]['RunitStatus'] == "Run": break time.sleep( 1 ) # Final check return getStartupComponentStatus( [ ('Web', 'WebApp') ] ) def installNewPortal(): """ Install runit directories for the Web Portal """ result = execCommand( False, ["pip", "install", "tornado"] ) if not result['OK']: error = "Tornado can not be installed:%s" % result['Value'] gLogger.error( error ) DIRAC.exit(-1) return error else: gLogger.notice("Tornado is installed successfully!") # Check that the software for the Web Portal is installed error = '' webDir = os.path.join( linkedRootPath, 'WebAppDIRAC' ) if not os.path.exists( webDir ): error = 'WebApp extension not installed at %s' % webDir if exitOnError: gLogger.error( error ) DIRAC.exit( -1 ) return S_ERROR( error ) #compile the JS code prodMode = "" webappCompileScript = os.path.join( linkedRootPath, "WebAppDIRAC/scripts", "dirac-webapp-compile.py" ) if os.path.isfile( webappCompileScript ): os.chmod( webappCompileScript , gDefaultPerms ) gLogger.notice( "Executing %s..." % webappCompileScript ) if os.system( "python '%s' > '%s.out' 2> '%s.err'" % ( webappCompileScript, webappCompileScript, webappCompileScript ) ): gLogger.error( "Compile script %s failed. Check %s.err" % ( webappCompileScript, webappCompileScript ) ) else: prodMode = "-p" # Check if the component is already installed runitWebAppDir = os.path.join( runitDir, 'Web', 'WebApp' ) # Check if the component is already installed if os.path.exists( runitWebAppDir ): msg = "Web Portal already installed" gLogger.notice( msg ) else: gLogger.notice( 'Installing Web Portal' ) # Now do the actual installation try: _createRunitLog( runitWebAppDir ) runFile = os.path.join( runitWebAppDir, 'run' ) fd = open( runFile, 'w' ) fd.write( """#!/bin/bash rcfile=%(bashrc)s [ -e $rcfile ] && source $rcfile # exec 2>&1 # exec python %(DIRAC)s/WebAppDIRAC/scripts/dirac-webapp-run.py %(prodMode)s < /dev/null """ % {'bashrc': os.path.join( instancePath, 'bashrc' ), 'DIRAC': linkedRootPath, 'prodMode':prodMode} ) fd.close() os.chmod( runFile, gDefaultPerms ) except Exception: error = 'Failed to prepare setup for Web Portal' gLogger.exception( error ) if exitOnError: DIRAC.exit( -1 ) return S_ERROR( error ) result = execCommand( 5, [runFile] ) gLogger.notice( result['Value'][1] ) return S_OK( runitWebAppDir ) def fixMySQLScripts( startupScript = mysqlStartupScript ): """ Edit MySQL scripts to point to desired locations for db and my.cnf """ gLogger.verbose( 'Updating:', startupScript ) try: fd = open( startupScript, 'r' ) orgLines = fd.readlines() fd.close() fd = open( startupScript, 'w' ) for line in orgLines: if line.find( 'export HOME' ) == 0: continue if line.find( 'datadir=' ) == 0: line = 'datadir=%s\n' % mysqlDbDir gLogger.debug( line ) line += 'export HOME=%s\n' % mysqlDir if line.find( 'basedir=' ) == 0: platform = getPlatformString() line = 'basedir=%s\n' % os.path.join( rootPath, platform ) if line.find( 'extra_args=' ) == 0: line = 'extra_args="-n"\n' if line.find( '$bindir/mysqld_safe --' ) >= 0 and not ' --no-defaults ' in line: line = line.replace( 'mysqld_safe', 'mysqld_safe --no-defaults' ) fd.write( line ) fd.close() except Exception: error = 'Failed to Update MySQL startup script' gLogger.exception( error ) if exitOnError: DIRAC.exit( -1 ) return S_ERROR( error ) return S_OK() def mysqlInstalled( doNotExit = False ): """ Check if MySQL is already installed """ if os.path.exists( mysqlDbDir ) or os.path.exists( mysqlLogDir ): return S_OK() if doNotExit: return S_ERROR() error = 'MySQL not properly Installed' gLogger.error( error ) if exitOnError: DIRAC.exit( -1 ) return S_ERROR( error ) def getMySQLPasswords(): """ Get MySQL passwords from local configuration or prompt """ import getpass global mysqlRootPwd, mysqlPassword if not mysqlRootPwd: mysqlRootPwd = getpass.getpass( 'MySQL root password: ' ) if not mysqlPassword: # Take it if it is already defined mysqlPassword = localCfg.getOption( '/Systems/Databases/Password', '' ) if not mysqlPassword: mysqlPassword = getpass.getpass( 'MySQL Dirac password: ' ) return S_OK() def setMySQLPasswords( root = '', dirac = '' ): """ Set MySQL passwords """ global mysqlRootPwd, mysqlPassword if root: mysqlRootPwd = root if dirac: mysqlPassword = dirac return S_OK() def startMySQL(): """ Start MySQL server """ result = mysqlInstalled() if not result['OK']: return result return execCommand( 0, [mysqlStartupScript, 'start'] ) def stopMySQL(): """ Stop MySQL server """ result = mysqlInstalled() if not result['OK']: return result return execCommand( 0, [mysqlStartupScript, 'stop'] ) def installMySQL(): """ Attempt an installation of MySQL mode: -Master -Slave -None """ fixMySQLScripts() if mysqlInstalled( doNotExit = True )['OK']: gLogger.notice( 'MySQL already installed' ) return S_OK() if mysqlMode.lower() not in [ '', 'master', 'slave' ]: error = 'Unknown MySQL server Mode' if exitOnError: gLogger.fatal( error, mysqlMode ) DIRAC.exit( -1 ) gLogger.error( error, mysqlMode ) return S_ERROR( error ) if mysqlHost: gLogger.notice( 'Installing MySQL server at', mysqlHost ) if mysqlMode: gLogger.notice( 'This is a MySQl %s server' % mysqlMode ) try: os.makedirs( mysqlDbDir ) os.makedirs( mysqlLogDir ) except Exception: error = 'Can not create MySQL dirs' gLogger.exception( error ) if exitOnError: DIRAC.exit( -1 ) return S_ERROR( error ) try: fd = open( mysqlMyOrg, 'r' ) myOrg = fd.readlines() fd.close() fd = open( mysqlMyCnf, 'w' ) for line in myOrg: if line.find( '[mysqld]' ) == 0: line += '\n'.join( [ 'innodb_file_per_table', '' ] ) elif line.find( 'innodb_log_arch_dir' ) == 0: line = '' elif line.find( 'innodb_data_file_path' ) == 0: line = line.replace( '2000M', '200M' ) elif line.find( 'server-id' ) == 0 and mysqlMode.lower() == 'master': # MySQL Configuration for Master Server line = '\n'.join( ['server-id = 1', '# DIRAC Master-Server', 'sync-binlog = 1', 'replicate-ignore-table = mysql.MonitorData', '# replicate-ignore-db=db_name', 'log-bin = mysql-bin', 'log-slave-updates', '' ] ) elif line.find( 'server-id' ) == 0 and mysqlMode.lower() == 'slave': # MySQL Configuration for Slave Server line = '\n'.join( ['server-id = %s' % int( time.time() ), '# DIRAC Slave-Server', 'sync-binlog = 1', 'replicate-ignore-table = mysql.MonitorData', '# replicate-ignore-db=db_name', 'log-bin = mysql-bin', 'log-slave-updates', '' ] ) elif line.find( '/opt/dirac/mysql' ) > -1: line = line.replace( '/opt/dirac/mysql', mysqlDir ) if mysqlSmallMem: if line.find( 'innodb_buffer_pool_size' ) == 0: line = 'innodb_buffer_pool_size = 200M\n' elif mysqlLargeMem: if line.find( 'innodb_buffer_pool_size' ) == 0: line = 'innodb_buffer_pool_size = 10G\n' fd.write( line ) fd.close() except Exception: error = 'Can not create my.cnf' gLogger.exception( error ) if exitOnError: DIRAC.exit( -1 ) return S_ERROR( error ) gLogger.notice( 'Initializing MySQL...' ) result = execCommand( 0, ['mysql_install_db', '--defaults-file=%s' % mysqlMyCnf, '--datadir=%s' % mysqlDbDir ] ) if not result['OK']: return result gLogger.notice( 'Starting MySQL...' ) result = startMySQL() if not result['OK']: return result gLogger.notice( 'Setting MySQL root password' ) result = execCommand( 0, ['mysqladmin', '-u', mysqlRootUser, 'password', mysqlRootPwd] ) if not result['OK']: return result # MySQL tends to define root@host user rather than root@host.domain hostName = mysqlHost.split('.')[0] result = execMySQL( "UPDATE user SET Host='%s' WHERE Host='%s'" % ( mysqlHost, hostName ), localhost=True ) if not result['OK']: return result result = execMySQL( "FLUSH PRIVILEGES" ) if not result['OK']: return result if mysqlHost and socket.gethostbyname( mysqlHost ) != '127.0.0.1' : result = execCommand( 0, ['mysqladmin', '-u', mysqlRootUser, '-h', mysqlHost, 'password', mysqlRootPwd] ) if not result['OK']: return result result = execMySQL( "DELETE from user WHERE Password=''", localhost=True ) if not _addMySQLToDiracCfg(): return S_ERROR( 'Failed to add MySQL user password to local configuration' ) return S_OK() def getMySQLStatus(): """ Get the status of the MySQL database installation """ result = execCommand( 0, ['mysqladmin', 'status' ] ) if not result['OK']: return result output = result['Value'][1] _d1, uptime, nthreads, nquestions, nslow, nopens, nflash, nopen, nqpersec = output.split( ':' ) resDict = {} resDict['UpTime'] = uptime.strip().split()[0] resDict['NumberOfThreads'] = nthreads.strip().split()[0] resDict['NumberOfQuestions'] = nquestions.strip().split()[0] resDict['NumberOfSlowQueries'] = nslow.strip().split()[0] resDict['NumberOfOpens'] = nopens.strip().split()[0] resDict['OpenTables'] = nopen.strip().split()[0] resDict['FlushTables'] = nflash.strip().split()[0] resDict['QueriesPerSecond'] = nqpersec.strip().split()[0] return S_OK( resDict ) def getAvailableDatabases( extensions ): dbDict = {} for extension in extensions + ['']: databases = glob.glob( os.path.join( rootPath, '%sDIRAC' % extension, '*', 'DB', '*.sql' ) ) for dbPath in databases: dbName = os.path.basename( dbPath ).replace( '.sql', '' ) dbDict[dbName] = {} dbDict[dbName]['Extension'] = extension dbDict[dbName]['System'] = dbPath.split( '/' )[-3].replace( 'System', '' ) return S_OK( dbDict ) def getDatabases(): """ Get the list of installed databases """ result = execMySQL( 'SHOW DATABASES' ) if not result['OK']: return result dbList = [] for dbName in result['Value']: if not dbName[0] in ['Database', 'information_schema', 'mysql', 'test']: dbList.append( dbName[0] ) return S_OK( dbList ) def installDatabase( dbName ): """ Install requested DB in MySQL server """ global mysqlRootPwd, mysqlPassword if not mysqlRootPwd: rootPwdPath = cfgInstallPath( 'Database', 'RootPwd' ) return S_ERROR( 'Missing %s in %s' % ( rootPwdPath, cfgFile ) ) if not mysqlPassword: mysqlPassword = localCfg.getOption( cfgPath( 'Systems', 'Databases', 'Password' ), mysqlPassword ) if not mysqlPassword: mysqlPwdPath = cfgPath( 'Systems', 'Databases', 'Password' ) return S_ERROR( 'Missing %s in %s' % ( mysqlPwdPath, cfgFile ) ) gLogger.notice( 'Installing', dbName ) dbFile = glob.glob( os.path.join( rootPath, '*', '*', 'DB', '%s.sql' % dbName ) ) if not dbFile: error = 'Database %s not found' % dbName gLogger.error( error ) if exitOnError: DIRAC.exit( -1 ) return S_ERROR( error ) dbFile = dbFile[0] # just check result = execMySQL( 'SHOW STATUS' ) if not result['OK']: error = 'Could not connect to MySQL server' gLogger.error( error ) if exitOnError: DIRAC.exit( -1 ) return S_ERROR( error ) # now creating the Database result = execMySQL( 'CREATE DATABASE `%s`' % dbName ) if not result['OK']: gLogger.error( 'Failed to create databases', result['Message'] ) if exitOnError: DIRAC.exit( -1 ) return result perms = "SELECT,INSERT,LOCK TABLES,UPDATE,DELETE,CREATE,DROP,ALTER,CREATE VIEW, SHOW VIEW" for cmd in ["GRANT %s ON `%s`.* TO '%s'@'localhost' IDENTIFIED BY '%s'" % ( perms, dbName, mysqlUser, mysqlPassword ), "GRANT %s ON `%s`.* TO '%s'@'%s' IDENTIFIED BY '%s'" % ( perms, dbName, mysqlUser, mysqlHost, mysqlPassword ), "GRANT %s ON `%s`.* TO '%s'@'%%' IDENTIFIED BY '%s'" % ( perms, dbName, mysqlUser, mysqlPassword ) ]: result = execMySQL( cmd ) if not result['OK']: error = "Error executing '%s'" % cmd gLogger.error( error, result['Message'] ) if exitOnError: DIRAC.exit( -1 ) return S_ERROR( error ) result = execMySQL( 'FLUSH PRIVILEGES' ) if not result['OK']: gLogger.error( 'Failed to flush provileges', result['Message'] ) if exitOnError: exit( -1 ) return result # first getting the lines to be executed, and then execute them try: cmdLines = _createMySQLCMDLines( dbFile ) # We need to run one SQL cmd at once, mysql is much happier that way. # Create a string of commands, ignoring comment lines sqlString = '\n'.join( x for x in cmdLines if not x.startswith( "--" ) ) # Now run each command (They are seperated by ;) # Ignore any empty ones cmds = [ x.strip() for x in sqlString.split( ";" ) if x.strip() ] for cmd in cmds: result = execMySQL( cmd, dbName ) if not result['OK']: error = 'Failed to initialize Database' gLogger.notice( cmd ) gLogger.error( error, result['Message'] ) if exitOnError: DIRAC.exit( -1 ) return S_ERROR( error ) except Exception, e: gLogger.error( str( e ) ) if exitOnError: DIRAC.exit( -1 ) return S_ERROR( error ) return S_OK( dbFile.split( '/' )[-4:-2] ) def _createMySQLCMDLines( dbFile ): """ Creates a list of MYSQL commands to be executed, inspecting the dbFile(s) """ cmdLines = [] fd = open( dbFile ) dbLines = fd.readlines() fd.close() for line in dbLines: # Should we first source an SQL file (is this sql file an extension)? if line.lower().startswith('source'): sourcedDBbFileName = line.split( ' ' )[1].replace( '\n', '' ) gLogger.info( "Found file to source: %s" % sourcedDBbFileName ) sourcedDBbFile = os.path.join( rootPath, sourcedDBbFileName ) fdSourced = open( sourcedDBbFile ) dbLinesSourced = fdSourced.readlines() fdSourced.close() for lineSourced in dbLinesSourced: if lineSourced.strip(): cmdLines.append( lineSourced.strip() ) # Creating/adding cmdLines else: if line.strip(): cmdLines.append( line.strip() ) return cmdLines def execMySQL( cmd, dbName = 'mysql', localhost=False ): """ Execute MySQL Command """ global db from DIRAC.Core.Utilities.MySQL import MySQL if not mysqlRootPwd: return S_ERROR( 'MySQL root password is not defined' ) if dbName not in db: dbHost = mysqlHost if localhost: dbHost = 'localhost' db[dbName] = MySQL( dbHost, mysqlRootUser, mysqlRootPwd, dbName, mysqlPort ) if not db[dbName]._connected: error = 'Could not connect to MySQL server' gLogger.error( error ) if exitOnError: DIRAC.exit( -1 ) return S_ERROR( error ) return db[dbName]._query( cmd ) def _addMySQLToDiracCfg(): """ Add the database access info to the local configuration """ if not mysqlPassword: return S_ERROR( 'Missing %s in %s' % ( cfgInstallPath( 'Database', 'Password' ), cfgFile ) ) sectionPath = cfgPath( 'Systems', 'Databases' ) cfg = __getCfg( sectionPath, 'User', mysqlUser ) cfg.setOption( cfgPath( sectionPath, 'Password' ), mysqlPassword ) return _addCfgToDiracCfg( cfg ) def configureCE( ceName = '', ceType = '', cfg = None, currentSectionPath = '' ): """ Produce new dirac.cfg including configuration for new CE """ from DIRAC.Resources.Computing.ComputingElementFactory import ComputingElementFactory from DIRAC import gConfig cesCfg = ResourcesDefaults.getComputingElementDefaults( ceName, ceType, cfg, currentSectionPath ) ceNameList = cesCfg.listSections() if not ceNameList: error = 'No CE Name provided' gLogger.error( error ) if exitOnError: DIRAC.exit( -1 ) return S_ERROR( error ) for ceName in ceNameList: if 'CEType' not in cesCfg[ceName]: error = 'Missing Type for CE "%s"' % ceName gLogger.error( error ) if exitOnError: DIRAC.exit( -1 ) return S_ERROR( error ) localsiteCfg = localCfg['LocalSite'] # Replace Configuration under LocalSite with new Configuration for ceName in ceNameList: if localsiteCfg.existsKey( ceName ): gLogger.notice( ' Removing existing CE:', ceName ) localsiteCfg.deleteKey( ceName ) gLogger.notice( 'Configuring CE:', ceName ) localsiteCfg.createNewSection( ceName, contents = cesCfg[ceName] ) # Apply configuration and try to instantiate the CEs gConfig.loadCFG( localCfg ) for ceName in ceNameList: ceFactory = ComputingElementFactory() try: ceInstance = ceFactory.getCE( ceType, ceName ) except Exception: error = 'Fail to instantiate CE' gLogger.exception( error ) if exitOnError: DIRAC.exit( -1 ) return S_ERROR( error ) if not ceInstance['OK']: error = 'Fail to instantiate CE: %s' % ceInstance['Message'] gLogger.error( error ) if exitOnError: DIRAC.exit( -1 ) return S_ERROR( error ) # Everything is OK, we can save the new cfg localCfg.writeToFile( cfgFile ) gLogger.always( 'LocalSite section in %s has been uptdated with new configuration:' % os.path.basename( cfgFile ) ) gLogger.always( str( localCfg['LocalSite'] ) ) return S_OK( ceNameList ) def configureLocalDirector( ceNameList = '' ): """ Install a Local DIRAC TaskQueueDirector, basically write the proper configuration file """ if ceNameList: result = setupComponent( 'agent', 'WorkloadManagement', 'TaskQueueDirector', [] ) if not result['OK']: return result # Now write a local Configuration for the Director directorCfg = CFG() directorCfg.addKey( 'SubmitPools', 'DIRAC', 'Added by InstallTools' ) directorCfg.addKey( 'DefaultSubmitPools', 'DIRAC', 'Added by InstallTools' ) directorCfg.addKey( 'ComputingElements', ', '.join( ceNameList ), 'Added by InstallTools' ) result = addCfgToComponentCfg( 'agent', 'WorkloadManagement', 'TaskQueueDirector', directorCfg ) if not result['OK']: return result return runsvctrlComponent( 'WorkloadManagement', 'TaskQueueDirector', 't' ) def execCommand( timeout, cmd ): """ Execute command tuple and handle Error cases """ result = systemCall( timeout, cmd ) if not result['OK']: if timeout and result['Message'].find( 'Timeout' ) == 0: return result gLogger.error( 'Failed to execute', '%s: %s' % ( cmd[0], result['Message'] ) ) if exitOnError: DIRAC.exit( -1 ) return result if result['Value'][0]: error = 'Failed to execute' gLogger.error( error, cmd[0] ) gLogger.error( 'Exit code:' , ( '%s\n' % result['Value'][0] ) + '\n'.join( result['Value'][1:] ) ) if exitOnError: DIRAC.exit( -1 ) error = S_ERROR( error ) error['Value'] = result['Value'] return error gLogger.verbose( result['Value'][1] ) return result
coberger/DIRAC
Core/Utilities/InstallTools.py
Python
gpl-3.0
88,582
[ "DIRAC" ]
e24981597d8a7bab675ef25a9d6a7dd06087efd407c63cc9ad5ede1a625fc795
# Copyright (C) 2012,2013 # Max Planck Institute for Polymer Research # Copyright (C) 2008,2009,2010,2011 # Max-Planck-Institute for Polymer Research & Fraunhofer SCAI # # This file is part of ESPResSo++. # # ESPResSo++ is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # ESPResSo++ is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. r""" **************************************** espressopp.interaction.DihedralPotential **************************************** This is an abstract class, only needed to be inherited from. .. function:: espressopp.interaction.DihedralPotential.computeEnergy(\*args) :param \*args: :type \*args: :rtype: .. function:: espressopp.interaction.DihedralPotential.computeForce(\*args) :param \*args: :type \*args: :rtype: """ # -*- coding: iso-8859-1 -*- from espressopp import pmi from espressopp import toReal3DFromVector from _espressopp import interaction_DihedralPotential # Python base class for dihedral potentials class DihedralPotentialLocal(object): def computeEnergy(self, *args): if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup(): if len(args) == 1: arg0 = args[0] if isinstance(arg0, float) or isinstance(arg0, int): return self.cxxclass.computeEnergy(self, arg0) return self.cxxclass.computeEnergy(self, toReal3DFromVector(*args)) def computeForce(self, *args): if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup(): if len(args) == 1: # in case theta is passed arg0 = args[0] if isinstance(arg0, float) or isinstance(arg0, int): return self.cxxclass.computeForce(self, arg0) return self.cxxclass.computeForce(self, toReal3DFromVector(*args)) if pmi.isController: class DihedralPotential(object): __metaclass__ = pmi.Proxy pmiproxydefs = dict( localcall = [ 'computeForce', 'computeEnergy' ], pmiproperty = [ 'cutoff' ] )
fedepad/espressopp
src/interaction/DihedralPotential.py
Python
gpl-3.0
2,660
[ "ESPResSo" ]
0ec9fca69530bad13c1269603e4518e90366847be472390555add67eb9233156
""" SkCode AST tree builder code. """ from collections import defaultdict from gettext import gettext as _ from .etree import ( RootTreeNode, TreeNode ) from .tags import ( DEFAULT_RECOGNIZED_TAGS_LIST, build_recognized_tags_dict, NewlineTreeNode, TextTreeNode ) from .tokenizer import ( tokenize_tag, TOKEN_DATA, TOKEN_NEWLINE, TOKEN_OPEN_TAG, TOKEN_CLOSE_TAG, TOKEN_SELF_CLOSE_TAG ) def parse_skcode(text: str, recognized_tags=DEFAULT_RECOGNIZED_TAGS_LIST, opening_tag_ch='[', closing_tag_ch=']', allow_tagvalue_attr=True, allow_self_closing_tags=True, root_node_cls=RootTreeNode, text_node_cls=TextTreeNode, newline_node_cls=NewlineTreeNode, mark_unclosed_tags_as_erroneous=False, max_nesting_depth=16, cls_options_overload=None): """ Parse the given text as a BBCode formatted document. Return the resulting document tree (DOM-like parser). :param text: The input text to be parsed. :param recognized_tags: A list containing all valid tag classes. :type recognized_tags: iterable[TreeNode] :param opening_tag_ch: The opening tag char (must be one char long exactly, default '['). :param closing_tag_ch: The closing tag char (must be one char long exactly, default ']'). :param allow_tagvalue_attr: Set to ``True`` to allow the BBCode ``tagname=tagvalue`` syntax shortcut (default is ``True``). :param allow_self_closing_tags: Set to ``True`` to allow the self closing tags syntax (default is ``True``). :param root_node_cls: The tree node class for the root node. :param text_node_cls: The tree node class for all normal text nodes. :param newline_node_cls: The tree node class for all newlines. :param mark_unclosed_tags_as_erroneous: If set to ``True``, unclosed tags will be mark as erroneous (default is ``False``). :param max_nesting_depth: The maximum nesting depth (default to 16). Set to zero to disable (not recommended because a Denial-Of-Service is possible if nesting depth is not limited). :param cls_options_overload: Dictionary of dictionaries mapped by node class type ``{class: {key : value}}`` to be used to overload node options settings on a per node class basis. This allow simple tweak of a default class behaviour at runtime. :type cls_options_overload: dict[TreeNode, dict[str, Any]] :return The resulting document tree at the end of the parsing stage. """ assert opening_tag_ch, "The opening tag character is mandatory." assert len(opening_tag_ch) == 1, "Opening tag character must be one char long exactly." assert closing_tag_ch, "The closing tag character is mandatory." assert len(closing_tag_ch) == 1, "Closing tag character must be one char long exactly." assert root_node_cls, "Root tree node class is mandatory." assert text_node_cls, "Text tree node class is mandatory." assert newline_node_cls, "Newline tree node class is mandatory." assert max_nesting_depth >= 0, "Maximum nesting depth must be greater or equal than zero." # Build the known tag names dictionary recognized_tags = build_recognized_tags_dict(recognized_tags) # Build the overload options dictionary extra_cls_kwargs = defaultdict(dict) if cls_options_overload: extra_cls_kwargs.update(cls_options_overload) # Initialize the parser root_tree_node = cur_tree_node = root_node_cls() cur_nesting_depth = 0 # Cleanup text to avoid parsing useless whitespaces text = text.strip() if not text: return root_tree_node # Tokenize the input text for token in tokenize_tag(text, opening_tag_ch, closing_tag_ch, allow_tagvalue_attr, allow_self_closing_tags): # Unpack the token token_type, tag_name, tag_attrs, token_source = token # Handle DATA block if not cur_tree_node.parse_embedded and (token_type != TOKEN_CLOSE_TAG or tag_name != cur_tree_node.name): # Append the raw source to the node until closing tag found cur_tree_node.content += token_source continue # The ``if`` below must be an ``if`` and not an ``elif`` because we need to parse # the closing tag of the DATA block when received. # Handle unrecognized tags if tag_name is not None and tag_name not in recognized_tags: # Turn the token into raw data cur_tree_node.new_child(None, text_node_cls, source_open_tag=token_source, error_message=_('Unknown tag name')) # SAX-like tree building algorithm elif token_type == TOKEN_DATA: # Append to the current node cur_tree_node.new_child(None, text_node_cls, content=token_source) elif token_type == TOKEN_NEWLINE: # Handle newline_closes option # Loop to handle the case when nested tag need to be closed at once while cur_tree_node.newline_closes and cur_tree_node.parent is not None: cur_tree_node = cur_tree_node.parent # Append to the current node cur_tree_node.new_child(None, newline_node_cls) elif token_type == TOKEN_OPEN_TAG: # Handle nesting depth limit if max_nesting_depth and cur_nesting_depth >= max_nesting_depth: # Tag cannot be open, fallback as erroneous text cur_tree_node.new_child(None, text_node_cls, source_open_tag=token_source, error_message=_('Nesting depth limit reached')) # End of processing for this tag continue # Load tag options tag_cls = recognized_tags[tag_name] # Handle same_tag_closes option if cur_tree_node.same_tag_closes \ and isinstance(cur_tree_node, tag_cls) \ and cur_tree_node.parent is not None: cur_tree_node = cur_tree_node.parent # Handle close_inlines if tag_cls.close_inlines: while cur_tree_node.inline and cur_tree_node.parent is not None: cur_tree_node = cur_tree_node.parent # Create a new child node new_node = cur_tree_node.new_child(tag_name, tag_cls, attrs=tag_attrs, source_open_tag=token_source, **extra_cls_kwargs[tag_cls]) # Jump to the new child node if not standalone if not tag_cls.standalone: cur_tree_node = new_node # Update nesting depth limit cur_nesting_depth += 1 elif token_type == TOKEN_CLOSE_TAG: # Check if current node can be closed if cur_tree_node.parent is None or cur_tree_node.name != tag_name: # Look for the parent to close depth = 0 cursor = cur_tree_node while cursor.parent is not None and ( (cursor.weak_parent_close and cursor.parent.name != tag_name) or cursor.parent.name == tag_name): depth += 1 cursor = cursor.parent # Handle weak parent close option if cursor.name == tag_name: # Close all traversal tree nodes cur_tree_node = cursor # Also close the parent node cur_tree_node.source_close_tag = token_source cur_tree_node = cur_tree_node.parent # Update nesting depth limit cur_nesting_depth -= depth else: # Tag cannot be closed, fallback as erroneous text cur_tree_node.new_child(None, text_node_cls, source_close_tag=token_source, error_message=_('Unexpected closing tag')) else: # Close the current tree node cur_tree_node.source_close_tag = token_source cur_tree_node = cur_tree_node.parent # Update nesting depth limit if cur_nesting_depth: cur_nesting_depth -= 1 elif token_type == TOKEN_SELF_CLOSE_TAG: # Load tag options tag_cls = recognized_tags[tag_name] # Detect erroneous self closing tag if not tag_cls.standalone: # Erroneous tag, fallback as erroneous text cur_tree_node.new_child(None, text_node_cls, source_open_tag=token_source, error_message=_('Unexpected self closing tag')) else: # Create a new child node cur_tree_node.new_child(tag_name, tag_cls, attrs=tag_attrs, source_open_tag=token_source, **extra_cls_kwargs[tag_cls]) # Close all remaining weak nodes while cur_tree_node != root_tree_node and cur_tree_node.parent is not None and cur_tree_node.weak_parent_close: cur_tree_node = cur_tree_node.parent # Mark unclosed tags as erroneous if mark_unclosed_tags_as_erroneous: while cur_tree_node != root_tree_node and cur_tree_node.parent is not None: cur_tree_node.error_message = _('Unclosed tag') cur_tree_node = cur_tree_node.parent # Perform sanity check pre_process_tree(root_tree_node) sanitize_tree(root_tree_node) post_process_tree(root_tree_node) # Return the resulting AST return root_tree_node def pre_process_tree(tree_node: TreeNode): """ Recursive method for pre-processing the given tree node and children recursively. :param tree_node: The tree node to be pre-processed. """ # Pre-process the node tree_node.pre_process_node() # Go down the tree for child_node in tree_node.children: pre_process_tree(child_node) def sanitize_tree(tree_node: TreeNode, breadcrumb=None): """ Recursive method for sanitizing the given tree node and children recursively. :param tree_node: The tree node to be sanitized. :param breadcrumb: The current breadcrumb of parent nodes (default to an empty list). :type breadcrumb: list or None """ breadcrumb = breadcrumb or [] # Down to top visit order (depth-first algorithm) with breadcrumb sub_breadcrumb = [] if tree_node.is_root else [tree_node] for child_node in tree_node.children: sanitize_tree(child_node, breadcrumb + sub_breadcrumb) # Sanitize the node tree_node.sanitize_node(breadcrumb) def post_process_tree(tree_node: TreeNode): """ Recursive method for post-processing the given tree node and children recursively. :param tree_node: The tree node to be post-processed. """ # Post-process the node tree_node.post_process_node() # Go down the tree for child_node in tree_node.children: post_process_tree(child_node)
TamiaLab/PySkCode
skcode/treebuilder.py
Python
agpl-3.0
11,686
[ "VisIt" ]
e0b3bb05f3648eaf6444541a7921d8faec87c060c17fa8d04c8d6bb7f9a16d1e
# $HeadURL: $ ''' DowntimeCommand module ''' import urllib2 from datetime import datetime, timedelta from DIRAC import gLogger, S_OK, S_ERROR from DIRAC.Core.LCG.GOCDBClient import GOCDBClient from DIRAC.ConfigurationSystem.Client.Helpers.Resources import getGOCSiteName, Resources from DIRAC.ResourceStatusSystem.Client.ResourceManagementClient import ResourceManagementClient from DIRAC.ResourceStatusSystem.Command.Command import Command from DIRAC.ResourceStatusSystem.Utilities import CSHelpers __RCSID__ = '$Id: $' class DowntimeCommand( Command ): ''' Downtime "master" Command. ''' def __init__( self, args = None, clients = None ): super( DowntimeCommand, self ).__init__( args, clients ) self.resources = Resources() if 'GOCDBClient' in self.apis: self.gClient = self.apis[ 'GOCDBClient' ] else: self.gClient = GOCDBClient() if 'ResourceManagementClient' in self.apis: self.rmClient = self.apis[ 'ResourceManagementClient' ] else: self.rmClient = ResourceManagementClient() def _storeCommand( self, result ): ''' Stores the results of doNew method on the database. ''' for dt in result: resQuery = self.rmClient.addOrModifyDowntimeCache( dt[ 'DowntimeID' ], dt[ 'Element' ], dt[ 'Name' ], dt[ 'StartDate' ], dt[ 'EndDate' ], dt[ 'Severity' ], dt[ 'Description' ], dt[ 'Link' ] ) if not resQuery[ 'OK' ]: return resQuery return S_OK() def _prepareCommand( self ): ''' DowntimeCommand requires three arguments: - name : <str> - element : Site / Resource - elementType: <str> If the elements are Site(s), we need to get their GOCDB names. They may not have, so we ignore them if they do not have. ''' if 'name' not in self.args: return S_ERROR( '"name" not found in self.args' ) elementName = self.args[ 'name' ] if 'element' not in self.args: return S_ERROR( '"element" not found in self.args' ) element = self.args[ 'element' ] if 'elementType' not in self.args: return S_ERROR( '"elementType" not found in self.args' ) elementType = self.args[ 'elementType' ] if not element in [ 'Site', 'Resource' ]: return S_ERROR( 'element is not Site nor Resource' ) hours = None if 'hours' in self.args: hours = self.args[ 'hours' ] # Transform DIRAC site names into GOCDB topics if element == 'Site': gocSite = getGOCSiteName( elementName ) if not gocSite[ 'OK' ]: return gocSite elementName = gocSite[ 'Value' ] # The DIRAC se names mean nothing on the grid, but their hosts do mean. elif elementType == 'StorageElement': result = CSHelpers.getSEProtocolOption( elementName, 'Host' ) if not result['OK']: return S_ERROR( 'No seHost for %s' % elementName ) elementName = result['Value'] return S_OK( ( element, elementName, hours ) ) def doNew( self, masterParams = None ): ''' Gets the parameters to run, either from the master method or from its own arguments. For every elementName, unless it is given a list, in which case it contacts the gocdb client. The server is not very stable, so in case of failure tries a second time. If there are downtimes, are recorded and then returned. ''' if masterParams is not None: element, elementNames = masterParams hours = None elementName = None else: params = self._prepareCommand() if not params[ 'OK' ]: return params element, elementName, hours = params[ 'Value' ] elementNames = [ elementName ] startDate = datetime.utcnow() - timedelta( days = 14 ) try: results = self.gClient.getStatus( element, elementName, startDate, 120 ) except urllib2.URLError: try: #Let's give it a second chance.. results = self.gClient.getStatus( element, elementName, startDate, 120 ) except urllib2.URLError, e: return S_ERROR( e ) if not results[ 'OK' ]: return results results = results[ 'Value' ] if results is None: return S_OK( None ) uniformResult = [] # Humanize the results into a dictionary, not the most optimal, but readable for downtime, downDic in results.items(): dt = {} if element == 'Resource': dt[ 'Name' ] = downDic[ 'HOSTNAME' ] else: dt[ 'Name' ] = downDic[ 'SITENAME' ] if not dt[ 'Name' ] in elementNames: continue dt[ 'DowntimeID' ] = downtime dt[ 'Element' ] = element dt[ 'StartDate' ] = downDic[ 'FORMATED_START_DATE' ] dt[ 'EndDate' ] = downDic[ 'FORMATED_END_DATE' ] dt[ 'Severity' ] = downDic[ 'SEVERITY' ] dt[ 'Description' ] = downDic[ 'DESCRIPTION' ].replace( '\'', '' ) dt[ 'Link' ] = downDic[ 'GOCDB_PORTAL_URL' ] uniformResult.append( dt ) storeRes = self._storeCommand( uniformResult ) if not storeRes[ 'OK' ]: return storeRes # We return only one downtime, if its ongoind at dtDate startDate = datetime.utcnow() endDate = startDate if hours: startDate = startDate + timedelta( hours = hours ) result = None for dt in uniformResult: if ( dt[ 'StartDate' ] < str( startDate ) ) and ( dt[ 'EndDate' ] > str( endDate ) ): result = dt #We want to take the latest one ( they are sorted by insertion time ) #break return S_OK( result ) def doCache( self ): ''' Method that reads the cache table and tries to read from it. It will return a list of dictionaries if there are results. ''' params = self._prepareCommand() if not params[ 'OK' ]: return params element, elementName, hours = params[ 'Value' ] result = self.rmClient.selectDowntimeCache( element = element, name = elementName ) if not result[ 'OK' ]: return result uniformResult = [ dict( zip( result[ 'Columns' ], res ) ) for res in result[ 'Value' ] ] # We return only one downtime, if its ongoind at dtDate dtDate = datetime.utcnow() result = None if not hours: # If not hours defined, we want the downtimes running now, which means, # the ones that already started and will finish later. for dt in uniformResult: if ( dt[ 'StartDate' ] < dtDate ) and ( dt[ 'EndDate' ] > dtDate ): result = dt break else: # If hours are defined, we want the downtimes starting in the next <hours> dtDateFuture = dtDate + timedelta( hours = hours ) for dt in uniformResult: if ( dt[ 'StartDate' ] > dtDate ) and ( dt[ 'StartDate' ] < dtDateFuture ): result = dt #We want to take the latest one ( they are sorted by insertion time ) #break return S_OK( result ) def doMaster( self ): ''' Master method, which looks little bit spaguetti code, sorry ! - It gets all sites and transforms them into gocSites. - It gets all the storage elements and transforms them into their hosts - It gets the fts, the ces and file catalogs. ''' gocSites = CSHelpers.getGOCSites() if not gocSites[ 'OK' ]: return gocSites gocSites = gocSites[ 'Value' ] sesHosts = CSHelpers.getStorageElementsHosts() if not sesHosts[ 'OK' ]: return sesHosts sesHosts = sesHosts[ 'Value' ] resources = sesHosts # # #FIXME: file catalogs need also to use their hosts # something similar applies to FTS Channels # #fts = CSHelpers.getFTS() #if fts[ 'OK' ]: # resources = resources + fts[ 'Value' ] #fc = CSHelpers.getFileCatalogs() #if fc[ 'OK' ]: # resources = resources + fc[ 'Value' ] ce = self.resources.getEligibleResources( 'Computing' ) if ce[ 'OK' ]: resources = resources + ce[ 'Value' ] gLogger.verbose( 'Processing Sites: %s' % ', '.join( gocSites ) ) siteRes = self.doNew( ( 'Site', gocSites ) ) if not siteRes[ 'OK' ]: self.metrics[ 'failed' ].append( siteRes[ 'Message' ] ) gLogger.verbose( 'Processing Resources: %s' % ', '.join( resources ) ) resourceRes = self.doNew( ( 'Resource', resources ) ) if not resourceRes[ 'OK' ]: self.metrics[ 'failed' ].append( resourceRes[ 'Message' ] ) return S_OK( self.metrics ) ################################################################################ #EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
avedaee/DIRAC
ResourceStatusSystem/Command/DowntimeCommand.py
Python
gpl-3.0
9,464
[ "DIRAC" ]
4b9ada2d69deb40594d27dde052f0851ddfea8c7b020f6e0edc510cbac39f83f
import types from DIRAC.Core.Utilities import Time class DBUtils: def __init__( self, db, setup ): self._acDB = db self._setup = setup def _retrieveBucketedData( self, typeName, startTime, endTime, selectFields, condDict = None, groupFields = None, orderFields = None ): """ Get data from the DB Parameters: - typeName -> typeName - startTime & endTime -> datetime objects. Do I need to explain the meaning? - selectFields -> tuple containing a string and a list of fields: ( "SUM(%s), %s/%s", ( "field1name", "field2name", "field3name" ) ) - condDict -> conditions for the query key -> name of the key field value -> list of possible values - groupFields -> list of fields to group by, can be in form ( "%s, %s", ( "field1name", "field2name", "field3name" ) ) - orderFields -> list of fields to order by, can be in form ( "%s, %s", ( "field1name", "field2name", "field3name" ) """ typeName = "%s_%s" % ( self._setup, typeName ) validCondDict = {} if type( condDict ) == types.DictType: for key in condDict: if type( condDict[ key ] ) in ( types.ListType, types.TupleType ) and len( condDict[ key ] ) > 0: validCondDict[ key ] = condDict[ key ] retVal = self._acDB._getConnection() if not retVal[ 'OK' ]: return retVal connObj = retVal[ 'Value' ] return self._acDB.retrieveBucketedData( typeName, startTime, endTime, selectFields, condDict, groupFields, orderFields, connObj = connObj ) def _getUniqueValues( self, typeName, startTime, endTime, condDict, fieldList ): stringList = [ "%s" for field in fieldList ] return self._retrieveBucketedData( typeName, startTime, endTime, ( ",".join( stringList ), fieldList ), condDict, fieldList ) def _groupByField( self, fieldIndex, dataList ): """ From a list of lists/tuples group them into a dict of lists using as key field fieldIndex """ groupDict = {} for row in dataList: groupingField = row[ fieldIndex ] if not groupingField in groupDict: groupDict[ groupingField ] = [] if type( row ) == types.TupleType: rowL = list( row[ :fieldIndex ] ) rowL.extend( row[ fieldIndex + 1: ] ) row = rowL else: del( row[ fieldIndex ] ) groupDict[ groupingField ].append( row ) return groupDict def _getBins( self, typeName, startTime, endTime ): typeName = "%s_%s" % ( self._setup, typeName ) return self._acDB.calculateBuckets( typeName, startTime, endTime ) def _getBucketLengthForTime( self, typeName, momentEpoch ): nowEpoch = Time.toEpoch() typeName = "%s_%s" % ( self._setup, typeName ) return self._acDB.calculateBucketLengthForTime( typeName, nowEpoch, momentEpoch ) def _spanToGranularity( self, granularity, bucketsData ): """ bucketsData must be a list of lists where each list contains - field 0: datetime - field 1: bucketLength - fields 2-n: numericalFields """ normData = {} def addToNormData( bucketDate, data, proportion = 1.0 ): if bucketDate in normData: for iP in range( len( data ) ): val = data[ iP ] if val == None: val = 0 normData[ bucketDate ][iP] += float( val ) * proportion normData[ bucketDate ][ -1 ] += proportion else: normData[ bucketDate ] = [] for fD in data: if fD == None: fD = 0 normData[ bucketDate ].append( float( fD ) * proportion ) normData[ bucketDate ].append( proportion ) for bucketData in bucketsData: bucketDate = bucketData[0] originalBucketLength = bucketData[1] bucketValues = bucketData[2:] if originalBucketLength == granularity: addToNormData( bucketDate, bucketValues ) else: startEpoch = bucketDate endEpoch = bucketDate + originalBucketLength newBucketEpoch = startEpoch - startEpoch % granularity if startEpoch == endEpoch: addToNormData( newBucketEpoch, bucketValues ) else: while newBucketEpoch < endEpoch: start = max( newBucketEpoch, startEpoch ) end = min( newBucketEpoch + granularity, endEpoch ) proportion = float( end - start ) / originalBucketLength addToNormData( newBucketEpoch, bucketValues, proportion ) newBucketEpoch += granularity return normData def _sumToGranularity( self, granularity, bucketsData ): """ bucketsData must be a list of lists where each list contains - field 0: datetime - field 1: bucketLength - fields 2-n: numericalFields """ normData = self._spanToGranularity( granularity, bucketsData ) for bDate in normData: del( normData[ bDate ][-1] ) return normData def _averageToGranularity( self, granularity, bucketsData ): """ bucketsData must be a list of lists where each list contains - field 0: datetime - field 1: bucketLength - fields 2-n: numericalFields """ normData = self._spanToGranularity( granularity, bucketsData ) for bDate in normData: for iP in range( len( normData[ bDate ] ) ): normData[ bDate ][iP] = float( normData[ bDate ][iP] ) / normData[ bDate ][-1] del( normData[ bDate ][-1] ) return normData def _convertNoneToZero( self, bucketsData ): """ Convert None to 0 bucketsData must be a list of lists where each list contains - field 0: datetime - field 1: bucketLength - fields 2-n: numericalFields """ for iPos in range( len( bucketsData ) ): data = bucketsData[iPos] for iVal in range( 2, len( data ) ): if data[ iVal ] == None: data[ iVal ] = 0 return bucketsData def _fillWithZero( self, granularity, startEpoch, endEpoch, dataDict ): """ Fill with zeros missing buckets - dataDict = { 'key' : { time1 : value, time2 : value... }, 'key2'.. } """ startBucketEpoch = startEpoch - startEpoch % granularity for key in dataDict: currentDict = dataDict[ key ] for timeEpoch in range( startBucketEpoch, endEpoch, granularity ): if timeEpoch not in currentDict: currentDict[ timeEpoch ] = 0 return dataDict def _getAccumulationMaxValue( self, dataDict ): """ Divide by factor the values and get the maximum value - dataDict = { 'key' : { time1 : value, time2 : value... }, 'key2'.. } """ maxValue = 0 maxEpoch = 0 for key in dataDict: currentDict = dataDict[ key ] for timeEpoch in currentDict: if timeEpoch > maxEpoch: maxEpoch = timeEpoch maxValue = 0 if timeEpoch == maxEpoch: maxValue += currentDict[ timeEpoch ] return maxValue def _getMaxValue( self, dataDict ): """ Divide by factor the values and get the maximum value - dataDict = { 'key' : { time1 : value, time2 : value... }, 'key2'.. } """ maxValues = {} for key in dataDict: currentDict = dataDict[ key ] for timeEpoch in currentDict: if timeEpoch not in maxValues: maxValues[ timeEpoch ] = 0 maxValues[ timeEpoch ] += currentDict[ timeEpoch ] maxValue = 0 for k in maxValues: maxValue = max( maxValue, k ) return maxValue def _divideByFactor( self, dataDict, factor ): """ Divide by factor the values and get the maximum value - dataDict = { 'key' : { time1 : value, time2 : value... }, 'key2'.. } """ maxValue = 0.0 for key in dataDict: currentDict = dataDict[ key ] for timeEpoch in currentDict: currentDict[ timeEpoch ] /= float( factor ) maxValue = max( maxValue, currentDict[ timeEpoch ] ) return dataDict, maxValue def _accumulate( self, granularity, startEpoch, endEpoch, dataDict ): """ Accumulate all the values. - dataDict = { 'key' : { time1 : value, time2 : value... }, 'key2'.. } """ startBucketEpoch = startEpoch - startEpoch % granularity for key in dataDict: currentDict = dataDict[ key ] lastValue = 0 for timeEpoch in range( startBucketEpoch, endEpoch, granularity ): if timeEpoch in currentDict: lastValue += currentDict[ timeEpoch ] currentDict[ timeEpoch ] = lastValue return dataDict def stripDataField( self, dataDict, fieldId ): """ Strip <fieldId> data and sum the rest as it was data from one key In: - dataDict : { 'key' : { <timeEpoch1>: [1, 2, 3], <timeEpoch2>: [3, 4, 5].. } } - fieldId : 0 Out - dataDict : { 'key' : { <timeEpoch1>: 1, <timeEpoch2>: 3.. } } - return : [ { <timeEpoch1>: 2, <timeEpoch2>: 4... } { <timeEpoch1>: 3, <timeEpoch2>): 5... } ] """ remainingData = [{}] #Hack for empty data for key in dataDict: for timestamp in dataDict[ key ]: for iPos in dataDict[ key ][ timestamp ]: remainingData.append( {} ) break break for key in dataDict: for timestamp in dataDict[ key ]: strippedField = float( dataDict[ key ][ timestamp ][ fieldId ] ) del( dataDict[ key ][ timestamp ][ fieldId ] ) for iPos in range( len( dataDict[ key ][ timestamp ] ) ): if timestamp in remainingData[ iPos ]: remainingData[ iPos ][ timestamp ] += float( dataDict[ key ][ timestamp ][ iPos ] ) else: remainingData[ iPos ][ timestamp ] = float( dataDict[ key ][ timestamp ][ iPos ] ) dataDict[ key ][ timestamp ] = strippedField return remainingData def getKeyValues( self, typeName, condDict ): """ Get all valid key values in a type """ retVal = self._acDB._getConnection() if not retVal[ 'OK' ]: return retVal connObj = retVal[ 'Value' ] typeName = "%s_%s" % ( self._setup, typeName ) return self._acDB.getKeyValues( typeName, condDict, connObj ) def _calculateProportionalGauges( self, dataDict ): """ Get a dict with more than one entry per bucket and list """ bucketSums = {} #Calculate total sums in buckets for key in dataDict: for timeKey in dataDict[ key ]: timeData = dataDict[ key ][ timeKey ] if len( timeData ) < 2: raise Exception( "DataDict must be of the type { <key>:{ <timeKey> : [ field1, field2, ..] } }. With at least two fields" ) if timeKey not in bucketSums: bucketSums[ timeKey ] = [ 0, 0, 0] bucketSums[ timeKey ][0] += timeData[0] bucketSums[ timeKey ][1] += timeData[1] bucketSums[ timeKey ][2] += timeData[0] / timeData[1] #Calculate proportionalFactor for timeKey in bucketSums: timeData = bucketSums[ timeKey ] if bucketSums[ timeKey ][0] == 0: bucketSums[ timeKey ] = 0 else: bucketSums[ timeKey ] = ( timeData[0] / timeData[1] ) / timeData[2] #Calculate proportional Gauges for key in dataDict: for timeKey in dataDict[ key ]: timeData = dataDict[ key ][ timeKey ] dataDict[ key ][ timeKey ] = [ ( timeData[0] / timeData[1] ) * bucketSums[ timeKey ] ] return dataDict def _getBucketTotals( self, dataDict ): """ Sum key data and get totals for each bucket """ newData = {} for k in dataDict: for bt in dataDict[ k ]: if bt not in newData: newData[ bt ] = 0.0 newData[ bt ] += dataDict[ k ][ bt ] return newData
avedaee/DIRAC
AccountingSystem/private/DBUtils.py
Python
gpl-3.0
12,101
[ "DIRAC" ]
d3969d2fde62c13279762b11388c59daa7454a128753c306ee814f3fc60cc12d
# This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at https://mozilla.org/MPL/2.0/. """ This input field is prepared to receive sisl objects that are plotables """ from pathlib import Path import sisl from sisl import BaseSile from sisl.physics import distribution from .._input_field import InputField from .basic import FloatInput, IntegerInput, OptionsInput, TextInput, DictInput, BoolInput from .file import FilePathInput from .queries import QueriesInput if not hasattr(BaseSile, "to_json"): # Little patch so that Siles can be sent to the GUI def sile_to_json(self): return str(self.file) BaseSile.to_json = sile_to_json forced_keys = { sisl.Geometry: 'geometry', sisl.Hamiltonian: 'H', sisl.BandStructure: 'band_structure', sisl.BrillouinZone: 'brillouin_zone', sisl.Grid: 'grid', sisl.EigenstateElectron: 'eigenstate', } class SislObjectInput(InputField): _type = "sisl_object" def __init__(self, key, *args, **kwargs): super().__init__(key, *args, **kwargs) if self.dtype is None: raise ValueError(f'Please provide a dtype for {key}') valid_key = forced_keys.get(self.dtype, None) if valid_key is not None and not key.endswith(valid_key): raise ValueError( f'Invalid key ("{key}") for an input that accepts {kwargs["dtype"]}, please use {valid_key}' 'to help keeping consistency across sisl and therefore make the world a better place.' f'If there are multiple settings that accept {kwargs["dtype"]}, please use *_{valid_key}' ) class GeometryInput(SislObjectInput): dtype = (sisl.Geometry, "sile (or path to file) that contains a geometry") _dtype = (str, sisl.Geometry, *sisl.get_siles(attrs=['read_geometry'])) def parse(self, val): if isinstance(val, (str, Path)): val = sisl.get_sile(val) if isinstance(val, sisl.io.BaseSile): val = val.read_geometry() return val class HamiltonianInput(SislObjectInput): pass class BandStructureInput(QueriesInput, SislObjectInput): dtype = sisl.BandStructure def __init__(self, *args, **kwargs): kwargs["help"] = """A band structure. it can either be provided as a sisl.BandStructure object or as a list of points, which will be parsed into a band structure object. """ # Let's define the queryform. Each query will be a point of the path. kwargs["queryForm"] = [ FloatInput( key="x", name="X", default=0, params={ "step": 0.01 } ), FloatInput( key="y", name="Y", default=0, params={ "step": 0.01 } ), FloatInput( key="z", name="Z", default=0, params={ "step": 0.01 } ), IntegerInput( key="divisions", name="Divisions", default=50, params={ "min": 0, "step": 10 } ), TextInput( key="name", name="Name", default=None, params = { "placeholder": "Name..." }, help = "Tick that should be displayed at this corner of the path." ), BoolInput( key="jump", name="Jump", default=False, help="""If True, this point just signals a discontinuity and the rest of inputs for this point will be ignored. """ ), ] super().__init__(*args, **kwargs) def parse(self, val): if not isinstance(val, sisl.BandStructure) and val is not None: # Then let's parse the list of points into a band structure object. # Use only those points that are active. val = [point for point in val if point.get("active", True)] points = [] divisions = [] names = [] # Loop over all points and construct the inputs for BandStructure for i_point, point in enumerate(val): if point.get("jump") is True: # This is a discontinuity points.append(None) if i_point > 0: divisions.append(1) else: # This is an actual point in the band structure. points.append( [point.get("x", None) or 0, point.get("y", None) or 0, point.get("z", None) or 0] ) names.append(point.get("name", "")) if i_point > 0: divisions.append(int(point["divisions"])) print(points, divisions, names) val = sisl.BandStructure(None, points=points, divisions=divisions, names=names) return val class BrillouinZoneInput(SislObjectInput): pass class GridInput(SislObjectInput): pass class EigenstateElectronInput(SislObjectInput): pass class PlotableInput(SislObjectInput): _type = "plotable" def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) class DistributionInput(DictInput, SislObjectInput): def __init__(self, *args, **kwargs): # Let's define the queryform (although we only want one point for now we use QueriesInput for convenience) kwargs["fields"] = [ OptionsInput( key="method", name="Method", default="gaussian", params={ "options": [{"label": dist, "value": dist} for dist in distribution.__all__ if dist != "get_distribution"], "isMulti": False, "isClearable": False, } ), FloatInput( key="smearing", name="Smearing", default=0.1, params={ "step": 0.01 } ), FloatInput( key="x0", name="Center", default=0.0, params={ "step": 0.01 } ), ] super().__init__(*args, **kwargs) def parse(self, val): if val and not callable(val): if isinstance(val, str): val = distribution.get_distribution(val) else: val = distribution.get_distribution(**self.complete_dict(val)) return val class SileInput(FilePathInput, SislObjectInput): def __init__(self, *args, required_attrs=None, **kwargs): if required_attrs: self._required_attrs = required_attrs kwargs["dtype"] = None super().__init__(*args, **kwargs) def _get_dtype(self): """ This is a temporal fix because for some reason some sile classes can not be pickled """ if hasattr(self, "_required_attrs"): return tuple(sisl.get_siles(attrs=self._required_attrs)) else: return self.__dict__["dtype"] def _set_dtype(self, val): self.__dict__["dtype"] = val dtype = property(fget=_get_dtype, fset=_set_dtype, )
zerothi/sisl
sisl/viz/input_fields/sisl_obj.py
Python
mpl-2.0
7,638
[ "Gaussian" ]
07560893088e47452aadb845925da26d7a8117c0bc34739826b892c21417d7bc
# Copyright (C) 2015 # Jakub Krajniak (jkrajniak at gmail.com) # Copyright (C) 2012,2013 # Max Planck Institute for Polymer Research # Copyright (C) 2008,2009,2010,2011 # Max-Planck-Institute for Polymer Research & Fraunhofer SCAI # # This file is part of ESPResSo++. # # ESPResSo++ is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # ESPResSo++ is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # Some helper classes usefull when parsing the gromacs topology """ *************** topology_helper *************** """ import espressopp import math import gromacs import os class FileBuffer(): def __init__(self): self.linecount=0 self.lines=[] self.pos=0 def appendline(self, line): self.lines.append(line) def readline(self): try: line=self.lines[self.pos] except: return '' self.pos+=1 return line def readlastline(self): try: line=self.lines[self.pos-1] except: return '' return line def seek(self, p): self.pos=p def tell(self): return self.pos def FillFileBuffer(fname, filebuffer): f=open(fname, 'r') for line in f: if "include" in line and not line[0]==';': name=(line.split()[1]).strip('\"') try: FillFileBuffer(name, filebuffer) except IOError: #need to use relative path name = os.path.join(os.path.dirname(fname), name) FillFileBuffer(name, filebuffer) else: l=line.rstrip('\n') if l: filebuffer.appendline(l) f.close return def FindType(proposedtype, typelist): list=[typeid for (typeid,atype) in typelist.iteritems() if atype==proposedtype ] if len(list)>1: print "Error: duplicate type definitons", proposedtype.parameters exit() elif len(list)==0: return None return list[0] class InteractionType: def __init__(self, parameters): self.parameters=parameters def __eq__(self,other): # interaction types are defined to be equal if all parameters are equal for k, v in self.parameters.iteritems(): if k not in other.parameters: return False if other.parameters[k]!=v: return False return True def createEspressoInteraction(self, system, fpl): print "WARNING: could not set up interaction for", self.parameters, ": Espresso potential not implemented" return None def automaticExclusion(self): #overwrite in derrived class if the particular interaction is automatically excluded return False class HarmonicBondedInteractionType(InteractionType): def createEspressoInteraction(self, system, fpl): # interaction specific stuff here # spring constant kb is half the gromacs spring constant pot = espressopp.interaction.Harmonic(self.parameters['kb']/2.0, self.parameters['b0']) interb = espressopp.interaction.FixedPairListHarmonic(system, fpl, pot) return interb def automaticExclusion(self): return True class MorseBondedInteractionType(InteractionType): def createEspressoInteraction(self, system, fpl): # interaction specific stuff here pot = espressopp.interaction.Morse(self.parameters['D'], self.parameters['beta'], self.parameters['rmin']) interb = espressopp.interaction.FixedPairListMorse(system, fpl, pot) return interb def automaticExclusion(self): return True class FENEBondedInteractionType(InteractionType): def createEspressoInteraction(self, system, fpl): # interaction specific stuff here # spring constant kb is half the gromacs spring constant pot = espressopp.interaction.Fene(self.parameters['kb']/2.0, self.parameters['b0']) interb = espressopp.interaction.FixedPairListFene(system, fpl, pot) return interb def automaticExclusion(self): return True class HarmonicAngleInteractionType(InteractionType): def createEspressoInteraction(self, system, fpl): # interaction specific stuff here # spring constant kb is half the gromacs spring constant. Also convert deg to rad pot = espressopp.interaction.AngularHarmonic(self.parameters['k']/2.0, self.parameters['theta']*2*math.pi/360) interb = espressopp.interaction.FixedTripleListAngularHarmonic(system, fpl, pot) return interb class TabulatedBondInteractionType(InteractionType): def createEspressoInteraction(self, system, fpl): spline = 3 fg = "table_b"+str(self.parameters['tablenr'])+".xvg" fe = fg.split(".")[0]+".tab" # name of espressopp file gromacs.convertTable(fg, fe) potTab = espressopp.interaction.Tabulated(itype=spline, filename=fe) interb = espressopp.interaction.FixedPairListTabulated(system, fpl, potTab) return interb def automaticExclusion(self): return self.parameters['excl'] class TabulatedAngleInteractionType(InteractionType): def createEspressoInteraction(self, system, fpl): spline = 3 fg = "table_a"+str(self.parameters['tablenr'])+".xvg" fe = fg.split(".")[0]+".tab" # name of espressopp file gromacs.convertTable(fg, fe) potTab = espressopp.interaction.TabulatedAngular(itype=spline, filename=fe) interb = espressopp.interaction.FixedTripleListTabulatedAngular(system, fpl, potTab) return interb class TabulatedDihedralInteractionType(InteractionType): def createEspressoInteraction(self, system, fpl): spline = 3 fg = "table_d"+str(self.parameters['tablenr'])+".xvg" fe = fg.split(".")[0]+".tab" # name of espressopp file gromacs.convertTable(fg, fe) potTab = espressopp.interaction.TabulatedDihedral(itype=spline, filename=fe) interb = espressopp.interaction.FixedQuadrupleListTabulatedDihedral(system, fpl, potTab) return interb class HarmonicNCosDihedralInteractionType(InteractionType): def createEspressoInteraction(self, system, fpl): # DihedralHarmonicNCos coded such that k = gromacs spring constant. Convert degrees to rad pot = espressopp.interaction.DihedralHarmonicNCos(self.parameters['K'], self.parameters['phi0']*2*math.pi/360, self.parameters['multiplicity']) interb = espressopp.interaction.FixedQuadrupleListDihedralHarmonicNCos(system, fpl, pot) return interb def automaticExclusion(self): return True class RyckaertBellemansDihedralInteractionType(InteractionType): def createEspressoInteraction(self, system, fpl): print('RyckaertBellemans: {}'.format(self.parameters)) pot = espressopp.interaction.DihedralRB(**self.parameters) return espressopp.interaction.FixedQuadrupleListDihedralRB(system, fpl, pot) class HarmonicDihedralInteractionType(InteractionType): def createEspressoInteraction(self, system, fpl): #print('RyckaertBellemans: {}'.format(self.parameters)) pot = espressopp.interaction.DihedralHarmonic(self.parameters['K'], self.parameters['phi0']*2*math.pi/360) return espressopp.interaction.FixedQuadrupleListDihedralHarmonic(system, fpl, pot) def ParseBondTypeParam(line): tmp = line.split() btype= tmp[2] # TODO: handle exclusions automatically if btype == "8": p=TabulatedBondInteractionType({"tablenr":int(tmp[3]),"k":float(tmp[4]), 'excl':True}) elif btype == "9": p=TabulatedBondInteractionType({"tablenr":int(tmp[3]), "k":float(tmp[4]), 'excl':False}) elif btype == "1": p=HarmonicBondedInteractionType({"b0":float(tmp[3]), "kb":float(tmp[4])}) elif btype == "3": p=MorseBondedInteractionType({"b0":float(tmp[3]), "D":float(tmp[4]), "beta":float(tmp[5])}) elif btype == "7": p=FENEBondedInteractionType({"b0":float(tmp[3]), "kb":float(tmp[4])}) elif btype == "9": p=TabulatedBondInteractionType({"tablenr":int(tmp[3]), "k":float(tmp[4])}) else: print "Unsupported bond type", tmp[2], "in line:" print line exit() return p def ParseAngleTypeParam(line): tmp = line.split() type= int(tmp[3]) if type == 1: p=HarmonicAngleInteractionType({"theta":float(tmp[4]), "k":float(tmp[5])}) elif type == 8: p=TabulatedAngleInteractionType({"tablenr":int(tmp[4]),"k":float(tmp[5])}) else: print "Unsupported angle type", type, "in line:" print line exit() return p def ParseDihedralTypeParam(line): tmp = line.split() type= int(tmp[4]) if type == 8: p=TabulatedDihedralInteractionType({"tablenr":int(tmp[5]), "k":float(tmp[6])}) elif type == 3: tmp[5:11] = map(float, tmp[5:11]) p = RyckaertBellemansDihedralInteractionType( {'K0': tmp[5], 'K1': tmp[6], 'K2': tmp[7], 'K3': tmp[8], 'K4': tmp[9], 'K5': tmp[10]} ) elif (type == 1) or (type == 9): p=HarmonicNCosDihedralInteractionType({"K":float(tmp[6]), "phi0":float(tmp[5]), "multiplicity":int(tmp[7])}) else: print "Unsupported dihedral type", type, "in line:" print line exit() return p def ParseImproperTypeParam(line): tmp = line.split() type= int(tmp[4]) if type == 4: p=HarmonicNCosDihedralInteractionType({"K":float(tmp[6]), "phi0":float(tmp[5]), "multiplicity":int(tmp[7])}) elif type == 2: p=HarmonicDihedralInteractionType({"K":float(tmp[6]), "phi0":float(tmp[5])}) else: print "Unsupported improper type", type, "in line:" print line exit() return p # Usefull code for generating the regular exclusions class Node(): def __init__(self, id): self.id=id self.neighbours=[] def addNeighbour(self, nb): self.neighbours.append(nb) def FindNodeById(id, nodes): list=[n for n in nodes if n.id==id ] if len(list)>1: print "Error: duplicate nodes", id exit() elif len(list)==0: return None return list[0] def FindNNextNeighbours(startnode, numberNeighbours, neighbours, forbiddenNodes): if numberNeighbours==0: return neighbours #avoid going back the same path forbiddenNodes.append(startnode) # Loop over next neighbours and add them to the neighbours list # Recursively call the function with numberNeighbours-1 for n in startnode.neighbours: if not n in forbiddenNodes: if n not in neighbours: neighbours.append(n) # avoid double counting in rings FindNNextNeighbours(n, numberNeighbours-1, neighbours, forbiddenNodes) def GenerateRegularExclusions(bonds, nrexcl, exclusions): nodes=[] # make a Node object for each atom involved in bonds for b in bonds: bids=b[0:2] for i in bids: if FindNodeById(i, nodes)==None: n=Node(i) nodes.append(n) # find the next neighbours for each node and append them for b in bonds: permutations=[(b[0], b[1]), (b[1], b[0])] for p in permutations: n=FindNodeById(p[0], nodes) nn=FindNodeById(p[1], nodes) n.addNeighbour(nn) # for each atom, call the FindNNextNeighbours function, which recursively # seraches for nrexcl next neighbours for n in nodes: neighbours=[] FindNNextNeighbours(n, nrexcl, neighbours, forbiddenNodes=[]) for nb in neighbours: # check if the permutation is already in the exclusion list # this may be slow, but to do it in every MD step is even slower... # TODO: find a clever algorithm which does avoid permuations from the start if not (n.id, nb.id) in exclusions: if not (nb.id, n.id) in exclusions: exclusions.append((n.id, nb.id)) return exclusions
kkreis/espressopp
src/tools/topology_helper.py
Python
gpl-3.0
12,558
[ "ESPResSo", "Gromacs" ]
bb5b4868df24d36e81c967a4195278284551bdb6c0b5284ef5b4c335f4049539
import ast import contextlib import io import os.path import types import pytest import ipytest from ipytest._impl import RewriteAssertTransformer @pytest.mark.parametrize( "spec", [ # any key that maps to true is expected to be removed by clean_tests {"test": True, "foo": False}, {"test_clean": True, "foo": False}, {"Test": True, "hello": False}, {"TestClass": True, "world": False}, {"Test_Class": True, "world": False}, {"teST": False, "bar": False}, {"TEst_Class": False, "world": False}, {"_test_clean": False, "foo": False}, {"_Test_Class": False, "world": False}, ], ) def test_clean(spec): expected = {k: v for k, v in spec.items() if not v} actual = spec.copy() ipytest.clean_tests(items=actual) assert actual == expected def test_reprs(): assert repr(ipytest._config.keep) == "<keep>" assert repr(ipytest._config.default) == "<default>" def fake_module(__name__, **items): mod = types.ModuleType(__name__) for k, v in items.items(): setattr(mod, k, v) return mod def test_fixtures(): @pytest.fixture def my_fixture(): return 42 def test_example(my_fixture): assert my_fixture == 42 ipytest.run( module=fake_module( __name__="empty_module", __file__=os.path.join(os.path.dirname(__file__), "empty_module.py"), my_fixture=my_fixture, test_example=test_example, ), ) assert ipytest.exit_code == 0 def test_parametrize(): @pytest.mark.parametrize("val", [0, 2, 4, 8, 10]) def test_example(val): assert val % 2 == 0 ipytest.run( module=fake_module( __name__="empty_module", __file__=os.path.join(os.path.dirname(__file__), "empty_module.py"), test_example=test_example, ), ) assert ipytest.exit_code == 0 def test_rewrite_assert_transformer_runs(): with open(__file__, "rt") as fobj: source = fobj.read() node = ast.parse(source) RewriteAssertTransformer().visit(node) def test_program_name(): with io.StringIO() as fobj, contextlib.redirect_stderr(fobj): ipytest.run("--foo") res = fobj.getvalue() assert "error" in res assert "%%ipytest" in res assert "ipykernel_launcher.py" not in res
chmp/ipytest
tests/test_ipytest.py
Python
mit
2,393
[ "VisIt" ]
d5bda60b37bf5a8cf731dd3834c83044f4931fcb75923a9b9a0c0e4ddf5f5e5d
Blocker = object() Start = False End = object() class Path(dict): @classmethod def determine_path(cls, field, width, height): path = cls() path.width = width path.height = height path.ends = set() path.starts = [] cells = [] for y in range(height): m = [] for x in range(width): cell = field[x, y] if cell is End: path[x, y] = 0 cells.append((x, y)) path.ends.add((x, y)) elif cell is Blocker: path[x, y] = Blocker elif cell is Start: path.starts.append((x, y)) #path.dump() x_extent = path.width-1 y_extent = path.height-1 while cells: new = [] for x, y in cells: v = path[x, y] if x > 0 and (x-1, y) not in path: path[x-1, y] = v + 1 new.append((x-1, y)) if x < x_extent and (x+1, y) not in path: path[x+1, y] = v + 1 new.append((x+1, y)) if y > 0 and (x, y-1) not in path: path[x, y-1] = v + 1 new.append((x, y-1)) if y < y_extent and (x, y+1) not in path: path[x, y+1] = v + 1 new.append((x, y+1)) cells = new for k in list(path.keys()): if path[k] is Blocker or path[k] is None: del path[k] return path def dump(self, mods={}): import sys for y in range(self.height): sys.stdout.write('%02d '%y) for x in range(self.width): p = (x, y) if p in mods: c = '*' else: c = self.get(p) if c is None: c = '.' elif c is Blocker: c = '#' else: c = chr(c + ord('0')) sys.stdout.write(c) print print print ' ', for i in range(self.width): sys.stdout.write('%d'%(i%10)) print def get_neighbors(self, x, y): '''May move horizontal, vertical or diagonal as long as there's not a blocker on both sides of the diagonal. ''' l = [] # top left if x > 0 and y < self.height-1: tx = x - 1; ty = y + 1 if (x, ty) in self and (tx, y) in self and (tx, ty) in self: l.append((self[tx, ty], (tx, ty))) # top right if x < self.width-1 and y < self.height-1: tx = x + 1; ty = y + 1 if (x, ty) in self and (tx, y) in self and (tx, ty) in self: l.append((self[tx, ty], (tx, ty))) # bottom left if x > 0 and y > 0: tx = x - 1; ty = y - 1 if (x, ty) in self and (tx, y) in self and (tx, ty) in self: l.append((self[tx, ty], (tx, ty))) # bottom right if x < self.width-1 and y > 0: tx = x + 1; ty = y - 1 if (x, ty) in self and (tx, y) in self and (tx, ty) in self: l.append((self[tx, ty], (tx, ty))) # left if x > 0: tx = x - 1 if (tx, y) in self: l.append((self[tx, y], (tx, y))) # right if x < self.width-1: tx = x + 1 if (tx, y) in self: l.append((self[tx, y], (tx, y))) # left if y > 0: ty = y - 1 if (x, ty) in self: l.append((self[x, ty], (x, ty))) # right if y < self.height-1: ty = y + 1 if (x, ty) in self: l.append((self[x, ty], (x, ty))) l.sort() return l def next_step(self, x, y): return self.get_neighbors(x, y)[0][1] def test_mod(self, set_cells): '''Determine whether the map would be solvable if the cells provided are blocked. ''' set_cells = set(set_cells) current = self.starts visited = set() while current: visited |= set(current) #print 'TRY', current #print 'VISITED', visited next = set() for x, y in current: options = self.get_neighbors(x, y) options.reverse() #print 'VISIT', (x, y), options while options: c = options.pop() p = c[1] if p in self.ends: #print 'END', p return True if p not in set_cells and p not in visited: next.add(p) break current = list(next) return False if __name__ == '__main__': field_cells = ''' ++++SSS+++++ +####.#####+ +#........#+ S#.......##E S..........E S#.......##E +#..###...#+ +####.#####+ ++++EEE+++++ '''.strip() field_rows = [line.strip() for line in field_cells.splitlines()] height = len(field_rows) width = len(field_rows[0]) play_field = {} for y, line in enumerate(field_rows): for x, cell in enumerate(line): if cell == '#': content = Blocker else: if cell == 'E': content = End elif cell == 'S': content = Start elif cell == '+': content = Blocker else: content = None play_field[x*2, y*2] = content play_field[x*2+1, y*2] = content play_field[x*2, y*2+1] = content play_field[x*2+1, y*2+1] = content path = Path.determine_path(play_field, width*2, height*2) path.dump() print path.get_neighbors(7, 13) print 'TEST BLOCKING MODS' path.dump(set(((18, 8), (19, 8), (18, 9), (19, 9)))) assert path.test_mod(()) == True assert path.test_mod(((18, 8), (19, 8), (18, 9), (19, 9))) == False assert path.test_mod(((0, 8), (0, 8), (1, 9), (1, 9))) == True
nicememory/pie
pyglet/contrib/currently-broken/spryte/dtd/path.py
Python
apache-2.0
6,345
[ "VisIt" ]
cb22df3b7c2f2ee159eb21bd0e206dd31f2bfdd160a3c7bc567b5cae6386cdb4
# neigh_dict and nn_dict are the same thing. # Need to combine/change variables at some point # In[251]: from __future__ import division, absolute_import import astropy.stats import glob import math import matplotlib.pyplot as plt from matplotlib import ticker from matplotlib.ticker import FormatStrFormatter import numpy as np import os import pandas as pd from scipy import integrate,optimize,spatial # In[252]: __author__ =['Victor Calderon'] __copyright__ =["Copyright 2016 Victor Calderon, Index function"] __email__ =['victor.calderon@vanderbilt.edu'] __maintainer__ =['Victor Calderon'] def Index(directory, datatype): """ Indexes the files in a directory `directory' with a specific data type. Parameters ---------- directory: str Absolute path to the folder that is indexed. datatype: str Data type of the files to be indexed in the folder. Returns ------- file_array: array_like np.array of indexed files in the folder 'directory' with specific datatype. Examples -------- >>> Index('~/data', '.txt') >>> array(['A.txt', 'Z'.txt', ...]) """ assert(os.path.exists(directory)) files = np.array(glob.glob('{0}/*{1}'.format(directory, datatype))) return files # In[253]: def myceil(x, base=10): """ Returns the upper-bound integer of 'x' in base 'base'. Parameters ---------- x: float number to be approximated to closest number to 'base' base: float base used to calculate the closest 'largest' number Returns ------- n_high: float Closest float number to 'x', i.e. upper-bound float. Example ------- >>>> myceil(12,10) 20 >>>> >>>> myceil(12.05, 0.1) 12.10000 """ n_high = float(base*math.ceil(float(x)/base)) return n_high ############################################################################### def myfloor(x, base=10): """ Returns the lower-bound integer of 'x' in base 'base' Parameters ---------- x: float number to be approximated to closest number of 'base' base: float base used to calculate the closest 'smallest' number Returns ------- n_low: float Closest float number to 'x', i.e. lower-bound float. Example ------- >>>> myfloor(12, 5) >>>> 10 """ n_low = float(base*math.floor(float(x)/base)) return n_low ############################################################################### def Bins_array_create(arr, base=10): """ Generates array between [arr.min(), arr.max()] in steps of `base`. Parameters ---------- arr: array_like, Shape (N,...), One-dimensional Array of numerical elements base: float, optional (default=10) Interval between bins Returns ------- bins_arr: array_like Array of bin edges for given arr """ base = float(base) arr = np.array(arr) assert(arr.ndim==1) arr_min = myfloor(arr.min(), base=base) arr_max = myceil( arr.max(), base=base) bins_arr = np.arange(arr_min, arr_max+0.5*base, base) return bins_arr # In[254]: def sph_to_cart(ra,dec,cz): """ Converts spherical coordinates to Cartesian coordinates. Parameters ---------- ra: array-like right-ascension of galaxies in degrees dec: array-like declination of galaxies in degrees cz: array-like velocity of galaxies in km/s Returns ------- coords: array-like, shape = N by 3 x, y, and z coordinates """ cz_dist = cz/70. #converts velocity into distance x_arr = cz_dist*np.cos(np.radians(ra))*np.cos(np.radians(dec)) y_arr = cz_dist*np.sin(np.radians(ra))*np.cos(np.radians(dec)) z_arr = cz_dist*np.sin(np.radians(dec)) coords = np.column_stack((x_arr,y_arr,z_arr)) return coords ############################################################################ def calc_dens(n_val,r_val): """ Returns densities of spheres with radius being the distance to the nth nearest neighbor. Parameters ---------- n_val = integer The 'N' from Nth nearest neighbor r_val = array-like An array with the distances to the Nth nearest neighbor for each galaxy Returns ------- dens: array-like An array with the densities of the spheres created with radii to the Nth nearest neighbor. """ dens = np.array([(3.*(n_val+1)/(4.*np.pi*r_val[hh]**3))\ for hh in range(len(r_val))]) return dens # In[255]: def plot_calcs(mass,bins,dlogM): """ Returns values for plotting the stellar mass function and mass ratios Parameters ---------- mass: array-like A 1D array with mass values, assumed to be in order bins: array=like A 1D array with the values which will be used as the bin edges by the histogram function dlogM: float-like The log difference between bin edges Returns ------- bin_centers: array-like An array with the medians mass values of the mass bins mass-freq: array-like Contains the number density values of each mass bin ratio_dict: dictionary-like A dictionary with three keys, corresponding to the divisors 2,4, and 10 (as the percentile cuts are based on these divisions). Each key has the density-cut, mass ratios for that specific cut (50/50 for 2; 25/75 for 4; 10/90 for 10). """ mass_counts, edges = np.histogram(mass,bins) bin_centers = 0.5*(edges[:-1]+edges[1:]) mass_freq = mass_counts/float(len(mass))/dlogM # non_zero = (mass_freq!=0) ratio_dict = {} frac_val = [2,4,10] yerr = [] bin_centers_fin = [] for ii in frac_val: ratio_dict[ii] = {} frac_data = int(len(mass)/ii) # Calculations for the lower density cut frac_mass = mass[0:frac_data] counts, edges = np.histogram(frac_mass,bins) # Calculations for the higher density cut frac_mass_2 = mass[-frac_data:] counts_2, edges_2 = np.histogram(frac_mass_2,bins) # Ratio determination ratio_counts = (1.*counts_2)/(1.*counts) non_zero = np.isfinite(ratio_counts) ratio_counts_1 = ratio_counts[non_zero] # print 'len ratio_counts: {0}'.format(len(ratio_counts_1)) ratio_dict[ii] = ratio_counts_1 temp_yerr = (counts_2*1.)/(counts*1.)* \ np.sqrt(1./counts + 1./counts_2) temp_yerr_1 = temp_yerr[non_zero] # print 'len yerr: {0}'.format(len(temp_yerr_1)) yerr.append(temp_yerr_1) bin_centers_1 = bin_centers[non_zero] # print 'len bin_cens: {0}'.format(len(bin_centers_1)) bin_centers_fin.append(bin_centers_1) mass_freq_list = [[] for xx in xrange(2)] mass_freq_list[0] = mass_freq mass_freq_list[1] = np.sqrt(mass_counts)/float(len(mass))/dlogM mass_freq = np.array(mass_freq_list) ratio_dict_list = [[] for xx in range(2)] ratio_dict_list[0] = ratio_dict ratio_dict_list[1] = yerr ratio_dict = ratio_dict_list return bin_centers, mass_freq, ratio_dict, bin_centers_fin # In[366]: def bin_func(mass_dist,bins,kk,bootstrap=False): """ Returns median distance to Nth nearest neighbor Parameters ---------- mass_dist: array-like An array with mass values in at index 0 (when transformed) and distance to the Nth nearest neighbor in the others Example: 6239 by 7 Has mass values and distances to 6 Nth nearest neighbors bins: array=like A 1D array with the values which will be used as the bin edges kk: integer-like The index of mass_dist (transformed) where the appropriate distance array may be found Optional -------- bootstrap == True Calculates the bootstrap errors associated with each median distance value. Creates an array housing arrays containing the actual distance values associated with every galaxy in a specific bin. Bootstrap error is then performed using astropy, and upper and lower one sigma values are found for each median value. These are added to a list with the median distances, and then converted to an array and returned in place of just 'medians.' Returns ------- medians: array-like An array with the median distance to the Nth nearest neighbor from all the galaxies in each of the bins """ edges = bins bin_centers = 0.5*(edges[:-1]+edges[1:]) # print 'length bins:' # print len(bins) digitized = np.digitize(mass_dist.T[0],edges) digitized -= int(1) bin_nums = np.unique(digitized) bin_nums_list = list(bin_nums) if (len(bin_centers)) in bin_nums_list: bin_nums_list.remove(len(bin_centers)) bin_nums = np.array(bin_nums_list) # print bin_nums non_zero_bins = [] for ii in bin_nums: if (len(mass_dist.T[kk][digitized==ii]) != 0): non_zero_bins.append(bin_centers[ii]) # print len(non_zero_bins) for ii in bin_nums: if len(mass_dist.T[kk][digitized==ii]) == 0: # temp_list = list(mass_dist.T[kk]\ # [digitized==ii]) # temp_list.append(np.nan) mass_dist.T[kk][digitized==ii] = np.nan # print bin_nums # print len(bin_nums) medians = np.array([np.nanmedian(mass_dist.T[kk][digitized==ii])\ for ii in bin_nums]) # print len(medians) if bootstrap == True: dist_in_bin = np.array([(mass_dist.T[kk][digitized==ii])\ for ii in bin_nums]) for vv in range(len(dist_in_bin)): if len(dist_in_bin[vv]) == 0: # dist_in_bin_list = list(dist_in_bin[vv]) # dist_in_bin[vv] = np.zeros(len(dist_in_bin[0])) dist_in_bin[vv] = np.nan low_err_test = np.array([np.percentile(astropy.stats.bootstrap\ (dist_in_bin[vv],bootnum=1000,bootfunc=np.median),16)\ for vv in range(len(dist_in_bin))]) high_err_test = np.array([np.percentile(astropy.stats.bootstrap\ (dist_in_bin[vv],bootnum=1000,bootfunc=np.median),84)\ for vv in range(len(dist_in_bin))]) med_list = [[] for yy in range(3)] med_list[0] = medians med_list[1] = low_err_test med_list[2] = high_err_test medians = np.array(med_list) # print len(medians) # print len(non_zero_bins) return medians, np.array(non_zero_bins) # In[257]: def hist_calcs(mass,bins,dlogM): """ Returns dictionaries with the counts for the upper and lower density portions; calculates the three different percentile cuts for each mass array given Parameters ---------- mass: array-like A 1D array with log stellar mass values, assumed to be an order which corresponds to the ascending densities; (necessary, as the index cuts are based on this) bins: array-like A 1D array with the values which will be used as the bin edges dlogM: float-like The log difference between bin edges Returns ------- hist_dict_low: dictionary-like A dictionary with three keys (the frac vals), with arrays as values. The values for the lower density cut hist_dict_high: dictionary like A dictionary with three keys (the frac vals), with arrays as values. The values for the higher density cut """ hist_dict_low = {} hist_dict_high = {} bin_cens_low = {} bin_cens_high = {} frac_val = np.array([2,4,10]) frac_dict = {2:0,4:1,10:2} edges = bins bin_centers = 0.5 * (edges[:-1]+edges[1:]) low_err = [[] for xx in xrange(len(frac_val))] high_err = [[] for xx in xrange(len(frac_val))] for ii in frac_val: # hist_dict_low[ii] = {} # hist_dict_high[ii] = {} frac_data = int(len(mass)/ii) frac_mass = mass[0:frac_data] counts, edges = np.histogram(frac_mass,bins) low_counts = (counts/float(len(frac_mass))/dlogM) non_zero = (low_counts!=0) low_counts_1 = low_counts[non_zero] hist_dict_low[ii] = low_counts_1 bin_cens_low[ii] = bin_centers[non_zero] ##So... I don't actually know if I need to be calculating error ##on the mocks. I thought I didn't, but then, I swear someone ##*ahem (Victor)* said to. So I am. Guess I'm not sure they're ##useful. But I'll have them if necessary. And ECO at least ##needs them. low_err = np.sqrt(counts)/len(frac_mass)/dlogM low_err_1 = low_err[non_zero] err_key = 'err_{0}'.format(ii) hist_dict_low[err_key] = low_err_1 frac_mass_2 = mass[-frac_data:] counts_2, edges_2 = np.histogram(frac_mass_2,bins) high_counts = (counts_2/float(len(frac_mass_2))/dlogM) non_zero = (high_counts!=0) high_counts_1 = high_counts[non_zero] hist_dict_high[ii] = high_counts_1 bin_cens_high[ii] = bin_centers[non_zero] high_err = np.sqrt(counts_2)/len(frac_mass_2)/dlogM high_err_1 = high_err[non_zero] hist_dict_high[err_key] = high_err_1 return hist_dict_low, hist_dict_high, bin_cens_low, bin_cens_high # In[258]: def mean_bin_mass(mass_dist,bins,kk): """ Returns mean mass of galaxies in each bin Parameters ---------- mass_dist: array-like An array with mass values in at index 0 (when transformed) bins: array=like A 1D array with the values which will be used as the bin edges Returns ------- """ edges = bins digitized = np.digitize(mass_dist.T[0],edges) digitized -= int(1) bin_nums = np.unique(digitized) for ii in bin_nums: if len(mass_dist.T[kk][digitized==ii]) == 0: mass_dist.T[kk][digitized==ii] = np.nan mean_mass = np.array([np.nanmean(mass_dist.T[0][digitized==ii])\ for ii in bin_nums]) return mean_mass # In[259]: # dirpath = r"C:\Users\Hannah\Desktop\Vanderbilt_REU\Stellar_mass_env_density" # dirpath += r"\Catalogs\Resolve_plk_5001_so_mvir_scatter_ECO_Mocks_" # dirpath += r"scatter_mocks\Resolve_plk_5001_so_mvir_scatter0p1_ECO_Mocks" dirpath = r"C:\Users\Hannah\Desktop\Vanderbilt_REU" dirpath += r"\Stellar_mass_env_Density\Catalogs" dirpath += r"\Mocks_Scatter_Abundance_Matching" dirpath += r"\Resolve_plk_5001_so_mvir_scatter0p1_ECO_Mocks" figsave_path = r"C:\Users\Hannah\Desktop\Vanderbilt_REU" figsave_path+= r"\Stellar_mass_env_Density\Plots" figsave_path+= r"\Abundance_matched" figsave_path+= r"\one_dec" usecols = (0,1,4,7,8,13) dlogM = 0.2 neigh_dict = {1:0,2:1,3:2,5:3,10:4,20:5} # In[260]: ECO_cats = (Index(dirpath,'.dat')) names = ['ra','dec','Halo_ID','cen_sat_flag','cz','logMstar'] PD = [[] for ii in range(len(ECO_cats))] for ii in range(len(ECO_cats)): temp_PD = (pd.read_csv(ECO_cats[ii],sep="\s+", usecols= usecols,\ header=None,skiprows=2,names=names)) col_list = list(temp_PD) col_list[2], col_list[3], col_list[4] = \ col_list[3], col_list[4], col_list[2] temp_PD.ix[:,col_list] PD[ii] = temp_PD PD_comp_1 = [(PD[ii][PD[ii].logMstar >= 9.1]) for ii in range(len(ECO_cats))] PD_comp_2 = [(PD_comp_1[ii][PD_comp_1[ii].logMstar <=11.77]) \ for ii in range(len(ECO_cats))] PD_comp = [(PD_comp_2[ii][PD_comp_2[ii].cen_sat_flag == 1]) \ for ii in range(len(ECO_cats))] [(PD_comp[ii].reset_index(drop=True,inplace=True)) \ for ii in range(len(ECO_cats))] min_max_mass_arr = [] for ii in range(len(PD_comp)): min_max_mass_arr.append(max(PD_comp[ii].logMstar)) min_max_mass_arr.append(min(PD_comp[ii].logMstar)) min_max_mass_arr = np.array(min_max_mass_arr) bins = Bins_array_create(min_max_mass_arr,dlogM) bins+= 0.1 bins_list = list(bins) for ii in bins: if ii > 11.77: bins_list.remove(ii) bins = np.array(bins_list) bin_centers = 0.5 * (bins[:-1]+bins[1:]) num_of_bins = int(len(bins) - 1) ra_arr = np.array([(PD_comp[ii].ra) for ii in range(len(PD_comp))]) dec_arr = np.array([(PD_comp[ii].dec) for ii in range(len(PD_comp))]) cz_arr = np.array([(PD_comp[ii].cz) for ii in range(len(PD_comp))]) mass_arr = np.array([(PD_comp[ii].logMstar) for ii in range(len(PD_comp))]) halo_id_arr = np.array([(PD_comp[ii].Halo_ID) for ii in range(len(PD_comp))]) coords_test = np.array([sph_to_cart(ra_arr[vv],dec_arr[vv],cz_arr[vv])\ for vv in range(len(ECO_cats))]) neigh_vals = np.array([1,2,3,5,10,20]) nn_arr_temp = [[] for uu in xrange(len(coords_test))] nn_arr = [[] for xx in xrange(len(coords_test))] nn_arr_nn = [[] for yy in xrange(len(neigh_vals))] nn_idx = [[] for zz in xrange(len(coords_test))] for vv in range(len(coords_test)): nn_arr_temp[vv] = spatial.cKDTree(coords_test[vv]) nn_arr[vv] = np.array(nn_arr_temp[vv].query(coords_test[vv],21)[0]) nn_idx[vv] = np.array(nn_arr_temp[vv].query(coords_test[vv],21)[1]) nn_specs = [(np.array(nn_arr).T[ii].T[neigh_vals].T) for ii in\ range(len(coords_test))] nn_mass_dist = np.array([(np.column_stack((mass_arr[qq],nn_specs[qq])))\ for qq in range(len(coords_test))]) nn_neigh_idx = np.array([(np.array(nn_idx).T[ii].T[neigh_vals].T) \ for ii in range(len(coords_test))]) truth_vals = {} for ii in range(len(halo_id_arr)): truth_vals[ii] = {} for jj in neigh_vals: halo_id_neigh = halo_id_arr[ii][nn_neigh_idx[ii].T[neigh_dict[jj]]].values truth_vals[ii][jj] = halo_id_neigh==halo_id_arr[ii].values # In[265]: halo_frac = {} for ii in range(len(mass_arr)): halo_frac[ii] = {} mass_binning = np.digitize(mass_arr[ii],bins) bins_to_use = list(np.unique(mass_binning)) if (len(bins)-1) not in bins_to_use: bins_to_use.append(len(bins)-1) if len(bins) in bins_to_use: bins_to_use.remove(len(bins)) for jj in neigh_vals: one_zero = truth_vals[ii][jj].astype(int) frac = [] for xx in bins_to_use: truth_binning = one_zero[mass_binning==xx] num_in_bin = len(truth_binning) if num_in_bin == 0: num_in_bin = np.nan num_same_halo = np.count_nonzero(truth_binning==1) frac.append(num_same_halo/(1.*num_in_bin)) halo_frac[ii][jj] = frac # In[266]: nn_dict = {1:0,2:1,3:2,5:3,10:4,20:5} mean_mock_halo_frac = {} for ii in neigh_vals: for jj in range(len(halo_frac)): bin_str = '{0}'.format(ii) oo_arr = halo_frac[jj][ii] n_o_elem = len(oo_arr) if jj == 0: oo_tot = np.zeros((n_o_elem,1)) oo_tot = np.insert(oo_tot,len(oo_tot.T),oo_arr,1) oo_tot = np.array(np.delete(oo_tot,0,axis=1)) oo_tot_mean = [np.nanmean(oo_tot[uu]) for uu in xrange(len(oo_tot))] oo_tot_std = [np.nanstd(oo_tot[uu])/np.sqrt(len(halo_frac)) \ for uu in xrange(len(oo_tot))] mean_mock_halo_frac[bin_str] = [oo_tot_mean,oo_tot_std] def plot_halo_frac(bin_centers,y_vals,ax,plot_idx,text=False): titles = [1,2,3,5,10,20] ax.set_xlim(9.1,11.9) ax.set_xticks(np.arange(9.5,12.,0.5)) ax.tick_params(axis='x', which='major', labelsize=16) if text == True: title_here = 'n = {0}'.format(titles[plot_idx]) ax.text(0.05, 0.95, title_here,horizontalalignment='left', \ verticalalignment='top',transform=ax.transAxes,fontsize=18) if plot_idx == 4: ax.set_xlabel('$\log\ (M_{*}/M_{\odot})$',fontsize=20) ax.plot(bin_centers,y_vals,color='silver') def plot_mean_halo_frac(bin_centers,mean_vals,ax,std): ax.errorbar(bin_centers,mean_vals,yerr=std,color='maroon') # In[56]: nrow = int(2) ncol = int(3) fig,axes = plt.subplots(nrows=nrow,ncols=ncol, \ figsize=(100,200),sharex=True) axes_flat = axes.flatten() zz = int(0) while zz <=4: for jj in neigh_vals: for kk in range(len(halo_frac)): if kk == 0: value = True else: value = False plot_halo_frac(bin_centers,halo_frac[kk][jj],axes_flat[zz],zz,\ text = value) nn_str = '{0}'.format(jj) plot_mean_halo_frac(bin_centers,mean_mock_halo_frac[nn_str][0],\ axes_flat[zz],mean_mock_halo_frac[nn_str][1]) # save_means = open("halo_frac_means.txt", "a") # save_means.write\ # (("{0} + \n + 'nn_val' + {1} + 'mean' {2} \n + 'error' {3}")\ # .format(dirpath,jj,mean_mock_halo_frac[nn_str][0],\ # mean_mock_halo_frac[nn_str][1])) # save_means.close() zz += 1 plt.subplots_adjust(top=0.97,bottom=0.1,left=0.03,right=0.99,hspace=0.10,\ wspace=0.12) # plt.savefig(figsave_path + r"\halo_frac_means") plt.show() # In[342]: # nn_dist = {} nn_dens = {} mass_dat = {} ratio_info = {} bin_cens_diff = {} mass_freq = [[] for xx in xrange(len(coords_test))] for ii in range(len(coords_test)): # nn_dist[ii] = {} nn_dens[ii] = {} mass_dat[ii] = {} ratio_info[ii] = {} bin_cens_diff[ii] = {} # nn_dist[ii]['mass'] = nn_mass_dist[ii].T[0] for jj in range(len(neigh_vals)): # nn_dist[ii][(neigh_vals[jj])] = np.array(nn_mass_dist[ii].T\ # [range(1,len(neigh_vals)+1)[jj]]) nn_dens[ii][(neigh_vals[jj])] = np.column_stack((nn_mass_dist[ii].T\ [0],calc_dens(neigh_vals[jj],\ nn_mass_dist[ii].T[range(1,len(neigh_vals)+1)[jj]]))) idx = np.array([nn_dens[ii][neigh_vals[jj]].T[1].argsort()]) mass_dat[ii][(neigh_vals[jj])] = (nn_dens[ii][neigh_vals[jj]]\ [idx].T[0]) bin_centers, mass_freq[ii], ratio_info[ii][neigh_vals[jj]],\ bin_cens_diff[ii][neigh_vals[jj]] = \ plot_calcs(mass_dat[ii][neigh_vals[jj]],bins,dlogM) all_mock_meds = {} mock_meds_bins = {} all_mock_mass_means = {} for vv in range(len(nn_mass_dist)): all_mock_meds[vv] = {} mock_meds_bins[vv]= {} all_mock_mass_means[vv] = {} for jj in range(len(nn_mass_dist[vv].T)-1): all_mock_meds[vv][neigh_vals[jj]],mock_meds_bins[vv][neigh_vals[jj]]\ = (bin_func(nn_mass_dist[vv],bins,(jj+1))) all_mock_mass_means[vv][neigh_vals[jj]] =\ (mean_bin_mass(nn_mass_dist[vv],bins,(jj+1))) # In[358]: med_plot_arr = {} for ii in range(len(neigh_vals)): med_plot_arr[neigh_vals[ii]] = {} for jj in range(len(nn_mass_dist)): med_plot_arr[neigh_vals[ii]][jj] = all_mock_meds[jj][neigh_vals[ii]] # for ii in range(len(neigh_vals)): # for jj in range(len(nn_mass_dist)): # print len(all_mock_meds[jj][ii]) mass_freq_plot = (np.array(mass_freq)) max_lim = [[] for xx in range(len(mass_freq_plot.T))] min_lim = [[] for xx in range(len(mass_freq_plot.T))] for jj in range(len(mass_freq_plot.T)): max_lim[jj] = max(mass_freq_plot.T[jj][0]) min_lim[jj] = min(mass_freq_plot.T[jj][0]) global bins_curve_fit bins_curve_fit = bins.copy() # global bins_curve_fit # In[281]: eco_path = r"C:\Users\Hannah\Desktop\Vanderbilt_REU\Stellar_mass_env_density" eco_path += r"\Catalogs\ECO_true" eco_cols = np.array([0,1,2,4]) # In[282]: ECO_true = (Index(eco_path,'.txt')) names = ['ra','dec','cz','logMstar'] PD_eco = pd.read_csv(ECO_true[0],sep="\s+", usecols=(eco_cols),header=None, \ skiprows=1,names=names) eco_comp = PD_eco[PD_eco.logMstar >= 9.1] ra_eco = (np.array(eco_comp)).T[0] dec_eco = (np.array(eco_comp)).T[1] cz_eco = (np.array(eco_comp)).T[2] mass_eco = (np.array(eco_comp)).T[3] coords_eco = sph_to_cart(ra_eco,dec_eco,cz_eco) eco_neighbor_tree = spatial.cKDTree(coords_eco) eco_tree_dist = np.array(eco_neighbor_tree.query(coords_eco, \ (neigh_vals[-1]+1))[0]) eco_mass_dist = np.column_stack((mass_eco,eco_tree_dist.T[neigh_vals].T)) ##range 1,7 because of the six nearest neighbors (and fact that 0 is mass) ##the jj is there to specify which index in the [1,6] array eco_dens = ([calc_dens(neigh_vals[jj], (eco_mass_dist.T[range(1,7)\ [jj]])) for jj in range (len(neigh_vals))]) eco_mass_dens = [(np.column_stack((mass_eco,eco_dens[ii]))) for ii in\ range(len(neigh_vals))] eco_idx = [(eco_mass_dens[jj].T[1].argsort()) for jj in \ range(len(neigh_vals))] eco_mass_dat = [(eco_mass_dens[jj][eco_idx[jj]].T[0]) for jj in\ range(len(neigh_vals))] eco_ratio_info = [[] for xx in xrange(len(eco_mass_dat))] eco_final_bins = [[] for xx in xrange(len(eco_mass_dat))] for qq in range(len(eco_mass_dat)): bin_centers, eco_freq, eco_ratio_info[qq],eco_final_bins[qq] = \ plot_calcs(eco_mass_dat[qq],bins,dlogM) eco_medians = [[] for xx in xrange(len(eco_mass_dat))] eco_med_bins = [[] for xx in xrange(len(eco_mass_dat))] eco_mass_means = [[] for xx in xrange(len(eco_mass_dat))] for jj in (range(len(eco_mass_dat))): eco_medians[jj],eco_med_bins[jj] = np.array(bin_func(eco_mass_dist,\ bins,(jj+1),bootstrap=True)) eco_mass_means[jj] = (mean_bin_mass(eco_mass_dist,bins,(jj+1))) # In[283]: hist_low_info = {} hist_high_info = {} hist_low_bins = {} hist_high_bins = {} for ii in xrange(len(coords_test)): hist_low_info[ii] = {} hist_high_info[ii] = {} hist_low_bins[ii] = {} hist_high_bins[ii] = {} for jj in range(len(neigh_vals)): hist_low_info[ii][neigh_vals[jj]],\ hist_high_info[ii][neigh_vals[jj]],\ hist_low_bins[ii][neigh_vals[jj]],\ hist_high_bins[ii][neigh_vals[jj]]\ = hist_calcs(mass_dat[ii][neigh_vals[jj]],bins,dlogM) frac_vals = [2,4,10] hist_low_arr = [[[] for yy in xrange(len(nn_mass_dist))] for xx in\ xrange(len(neigh_vals))] hist_high_arr = [[[] for yy in xrange(len(nn_mass_dist))] for xx in\ xrange(len(neigh_vals))] # In[284]: hist_low_info[0][1] # In[285]: for ii in range(len(neigh_vals)): for jj in range(len(nn_mass_dist)): hist_low_arr[ii][jj] = (hist_low_info[jj][neigh_vals[ii]]) hist_high_arr[ii][jj] = (hist_high_info[jj][neigh_vals[ii]]) ##I unindented the below two "lines". Because they don't \ # seem to need to be called iteratively plot_low_hist = [[[[] for yy in xrange(len(nn_mass_dist))]\ for zz in xrange(len(frac_vals))] for xx in\ xrange(len(hist_low_arr))] plot_high_hist = [[[[] for yy in xrange(len(nn_mass_dist))]\ for zz in xrange(len(frac_vals))] for xx in\ xrange(len(hist_high_arr))] for jj in range(len(nn_mass_dist)): for hh in range(len(frac_vals)): for ii in range(len(neigh_vals)): plot_low_hist[ii][hh][jj] = hist_low_arr[ii][jj][frac_vals[hh]] plot_high_hist[ii][hh][jj] = hist_high_arr[ii][jj][frac_vals[hh]] # In[286]: eco_mass_means # In[287]: eco_low = {} eco_high = {} eco_low_bins = {} eco_high_bins = {} for jj in range(len(neigh_vals)): eco_low[neigh_vals[jj]] = {} eco_high[neigh_vals[jj]] = {} eco_low_bins[neigh_vals[jj]] = {} eco_high_bins[neigh_vals[jj]] = {} eco_low[neigh_vals[jj]], eco_high[neigh_vals[jj]],\ eco_low_bins[neigh_vals[jj]], eco_high_bins[neigh_vals[jj]]=\ hist_calcs(eco_mass_dat[jj],bins,dlogM) # In[288]: eco_low[1] # In[289]: def perc_calcs(mass,bins,dlogM): mass_counts, edges = np.histogram(mass,bins) mass_freq = mass_counts/float(len(mass))/dlogM bin_centers = 0.5*(edges[:-1]+edges[1:]) non_zero = (mass_freq!=0) mass_freq_1 = mass_freq[non_zero] smf_err = np.sqrt(mass_counts)/float(len(mass))/dlogM smf_err_1 = smf_err[non_zero] bin_centers_1 = bin_centers[non_zero] return mass_freq_1, smf_err_1, bin_centers_1 # In[290]: def quartiles(mass): dec_val = int(len(mass)/4) res_list = [[] for bb in range(4)] for aa in range(0,4): if aa == 3: res_list[aa] = mass[aa*dec_val:] else: res_list[aa] = mass[aa*dec_val:(aa+1)*dec_val] return res_list # In[291]: def deciles(mass): dec_val = int(len(mass)/10) res_list = [[] for bb in range(10)] for aa in range(0,10): if aa == 9: res_list[aa] = mass[aa*dec_val:] else: res_list[aa] = mass[aa*dec_val:(aa+1)*dec_val] return res_list # In[412]: def mean_perc_mass(mass,bins): """ Returns mean mass of galaxies in each bin Parameters ---------- mass_dist: array-like An array with mass values in at index 0 (when transformed) bins: array=like A 1D array with the values which will be used as the bin edges Returns ------- """ edges = bins digitized = np.digitize(mass,edges) digitized -= int(1) bin_nums = np.unique(digitized) for ii in bin_nums: if len(mass[digitized==ii]) == 0: mass[digitized==ii] = np.nan mean_mass = np.array([np.nanmean(mass[digitized==ii])\ for ii in bin_nums]) return mean_mass eco_dec = {} for cc in range(len(eco_mass_dat)): eco_dec[neigh_vals[cc]] = deciles(eco_mass_dat[cc]) eco_dec_smf = {} eco_dec_err = {} eco_dec_bin = {} for ss in neigh_vals: eco_dec_smf[ss] = {} eco_dec_err[ss] = {} eco_dec_bin[ss] = {} for tt in range(len(eco_dec[ss])): eco_dec_smf[ss][tt], eco_dec_err[ss][tt], eco_dec_bin[ss][tt] = \ perc_calcs(eco_dec[ss][tt],bins,dlogM) # # Stellar Mass Function # In[294]: fig,ax = plt.subplots(figsize=(8,8)) ax.set_title('Mass Distribution',fontsize=18) ax.set_xlabel('$\log\ (M_{*}/M_{\odot})$',fontsize=18) ax.set_ylabel(r'$\log\ (\frac{N_{gal}}{N_{total}*dlogM_{*}})$',fontsize=20) ax.set_yscale('log') ax.set_xlim(9.1,11.9) ax.tick_params(axis='both', labelsize=14) for ii in range(len(mass_freq)): ax.plot(bin_centers,mass_freq[ii][0],color='silver') ax.fill_between(bin_centers,max_lim,min_lim,color='silver',alpha=0.1) ax.errorbar(bin_centers,eco_freq[0],yerr=eco_freq[1],color='maroon',\ linewidth=2,label='ECO') ax.legend(loc='best') plt.subplots_adjust(left=0.15, bottom=0.1, right=0.85, top=0.94,\ hspace=0.2,wspace=0.2) # plt.savefig(figsave_path + r"\stellar_mass_func") plt.show() # # The Mess I am unlovingly referring to as Schechter Functions # In[432]: def schechter_real_func(mean_of_mass_bin,phi_star,alpha,Mstar): """ mean_of_mass_bin: array-like Unlogged x-values phi-star: float-like Normalization value alpha: float-like Low-mass end slope Mstar: float-like Unlogged value where function switches from power-law to exponential """ # M_over_mstar = (10**mean_of_mass_bin)/Mstar M_over_mstar = (mean_of_mass_bin)/Mstar res_arr = (phi_star) * (M_over_mstar**(alpha)) *\ np.exp(- M_over_mstar) return res_arr # In[40]: def schechter_log_func(stellar_mass,phi_star,alpha,m_star): """ Returns a plottable Schechter function for the stellar mass functions of galaxies Parameters ---------- stellar_mass: array-like An array of unlogged stellar mass values which will eventually be the x-axis values the function is plotted against phi_star: float-like A constant which normalizes (?) the function; Moves the graph up and down alpha: negative integer-like The faint-end, or in this case, low-mass slope; Describes the power-law portion of the curve m_star: float-like Unlogged value of the characteristic (?) stellar mass; the "knee" of the function, where the power-law gives way to the exponential portion Returns ------- res: array-like Array of values to be plotted on a log scale to display the Schechter function """ constant = np.log(10) * phi_star log_M_Mstar = np.log10(stellar_mass/m_star) res = constant * 10**(log_M_Mstar * (alpha+1)) *\ np.exp(-10**log_M_Mstar) return res # In[41]: def schech_integral(edge_1,edge_2,phi_star,alpha,Mstar): bin_integral = (integrate.quad(schechter_real_func,edge_1,edge_2,\ args=(phi_star,alpha,Mstar))[0]) # tot_integral = (integrate.quad(schechter_real_func,9.1,11.7,\ # args=(phi_star,alpha,Mstar)))[0] # # # result = bin_integral/tot_integral/0.2 return bin_integral def schech_step_3(xdata,phi_star,alpha,Mstar): """ xdata: array-like Unlogged x-values Mstar: unlogged """ test_int = [] for ii in range(len(xdata)): test_int.append((schech_integral(10**bins_curve_fit[ii],\ 10**bins_curve_fit[ii+1],phi_star,alpha,Mstar))) return test_int # In[44]: def find_params(bin_int,mean_mass,count_err): """ Parameters ---------- bin_int: array-like Integral (number of counts) in each bin of width dlogM mean_mass: array-like Logged values (?) Returns ------- opt_v: array-like Array with three values: phi_star, alpha, and M_star res_arr: array-like Array with two values: alpha and log_M_star """ xdata = 10**mean_mass # xdata = mean_mass p0 = (1.5,-1.05,10**10.64) opt_v,est_cov = optimize.curve_fit(schech_step_3,xdata,\ bin_int,p0=p0,sigma=count_err,check_finite=True) alpha = opt_v[1] log_m_star = np.log10(opt_v[2]) res_arr = np.array([alpha,log_m_star]) perr = np.sqrt(np.diag(est_cov)) return opt_v, res_arr, perr, est_cov # fig, ax = plt.subplots() # ax.set_yscale('log') # ax.set_xscale('log') # # ax.plot(eco_mass_means[0][:-3],test) # ax.plot(10**bin_centers,schech_vals_graph) # ax.plot(10**eco_mass_means[0][:-3],(eco_dec_smf[1][0])) # plt.show() # # Regular Plotting Reintroduced # In[104]: def plot_all_rats(bin_centers,y_vals,neigh_val,ax,col_num,plot_idx,text=False): """ Returns a plot showing the density-cut, mass ratio. Optimally used with a well-initiated for-loop Parameters ---------- bin_centers: array-like An array with the medians mass values of the mass bins y_vals: array-like An array containing the ratio values for each mass bin neigh_val: integer-like Value which will be inserted into the text label of each plot ax: axis-like A value which specifies which axis each subplot is to be plotted to col_num: integer-like Integer which specifies which column is currently being plotted. Used for labelling subplots plot_idx: integer-like Specifies which subplot the figure is plotted to. Used for labeling the x-axis Returns ------- Figure with three subplots showing appropriate ratios """ if plot_idx ==16: ax.set_xlabel('$\log\ (M_{*}/M_{\odot})$',fontsize=18) if text == True: if col_num ==0: title_label = 'Mass Ratio 50/50, {0} NN'.format(neigh_val) frac_val = 10 ax.text(0.05, 0.95, title_label,horizontalalignment='left',\ verticalalignment='top',transform=ax.transAxes,\ fontsize=12) elif col_num ==1: title_label = 'Mass Ratio 25/75, {0} NN'.format(neigh_val) frac_val = 4 ax.text(0.05, 0.95, title_label,horizontalalignment='left',\ verticalalignment='top',transform=ax.transAxes,\ fontsize=12) elif col_num ==2: title_label = 'Mass Ratio 10/90, {0} NN'.format(neigh_val) frac_val = 2 ax.text(0.05, 0.95, title_label,horizontalalignment='left',\ verticalalignment='top',transform=ax.transAxes,\ fontsize=12) ax.set_xlim(9.1,11.9) # ax.set_ylim([0,5]) ax.set_ylim(0,7) ax.set_xticks(np.arange(9.5, 12., 0.5)) ax.set_yticks([1.,3.]) ax.tick_params(axis='both', labelsize=12) ax.axhline(y=1,c="darkorchid",linewidth=0.5,zorder=0) ax.plot(bin_centers,y_vals,color='silver') # In[103]: def plot_eco_rats(bin_centers,y_vals,neigh_val,ax,col_num,plot_idx,only=False): """ Returns subplots of ECO density-cut,mass ratios Parameters ---------- bin_centers: array-like An array with the medians mass values of the mass bins y_vals: array-like An array containing the ratio values for each mass bin neigh_val: integer-like Value which will be inserted into the text label of each plot ax: axis-like A value which specifies which axis each subplot is to be plotted to col_num: integer-like Integer which specifies which column is currently being plotted. Used for labelling subplots plot_idx: integer-like Specifies which subplot the figure is plotted to. Used for labeling the x-axis Optional -------- only == True To be used when only plotting the ECO ratios, no mocks. Will add in the additional plotting specifications that would have been taken care of previously in a for-loop which plotted the mocks as well Returns ------- ECO ratios plotted to any previously initialized figure """ if only == True: if plot_idx ==16: ax.set_xlabel('$\log\ (M_{*}/M_{\odot})$',fontsize=18) if col_num ==0: title_label = 'Mass Ratio 50/50, {0} NN'.format(neigh_val) frac_val = 10 ax.text(0.05, 0.95, title_label,horizontalalignment='left',\ verticalalignment='top',transform=ax.transAxes,fontsize=12) elif col_num ==1: title_label = 'Mass Ratio 25/75, {0} NN'.format(neigh_val) frac_val = 4 ax.text(0.05, 0.95, title_label,horizontalalignment='left',\ verticalalignment='top',transform=ax.transAxes,fontsize=12) elif col_num ==2: title_label = 'Mass Ratio 10/90, {0} NN'.format(neigh_val) frac_val = 2 ax.text(0.05, 0.95, title_label,horizontalalignment='left',\ verticalalignment='top',transform=ax.transAxes,fontsize=12) ax.set_xlim(9.1,11.9) ax.set_ylim(0,7) # ax.set_ylim([0,5]) ax.set_xticks(np.arange(9.5, 12., 0.5)) ax.set_yticks([1.,3.]) ax.tick_params(axis='both', labelsize=12) ax.axhline(y=1,c="darkorchid",linewidth=0.5,zorder=0) frac_vals = np.array([2,4,10]) y_vals_2 = y_vals[0][frac_vals[hh]] ax.errorbar(bin_centers,y_vals_2,yerr=y_vals[1][hh],\ color='maroon',linewidth=2) # In[83]: frac_vals = [2,4,10] nn_plot_arr = [[[] for yy in xrange(len(nn_mass_dist))] for xx in\ xrange(len(neigh_vals))] for ii in range(len(neigh_vals)): for jj in range(len(nn_mass_dist)): nn_plot_arr[ii][jj] = (ratio_info[jj][neigh_vals[ii]]) plot_frac_arr = [[[[] for yy in xrange(len(nn_mass_dist))]\ for zz in xrange(len(frac_vals))] for xx in\ xrange(len(nn_plot_arr))] frac_err_arr = [[[[] for yy in xrange(len(nn_mass_dist))]\ for zz in xrange(len(frac_vals))] for xx in\ xrange(len(nn_plot_arr))] for jj in range(len(nn_mass_dist)): for hh in range(len(frac_vals)): for ii in range(len(neigh_vals)): plot_frac_arr[ii][hh][jj] = nn_plot_arr[ii][jj][0][frac_vals[hh]] frac_err_arr[ii][hh][jj] = nn_plot_arr[ii][jj][1][hh] # In[105]: np.seterr(divide='ignore',invalid='ignore') nrow_num = int(6) ncol_num = int(3) zz = int(0) fig, axes = plt.subplots(nrows=nrow_num, ncols=ncol_num,\ figsize=(100,200), sharex= True,sharey=True) axes_flat = axes.flatten() fig.text(0.01, 0.5, 'High Density Counts/Lower Density Counts', ha='center',\ va='center',rotation='vertical',fontsize=20) # fig.suptitle("Percentile Trends", fontsize=18) while zz <= 16: for ii in range(len(eco_ratio_info)): for hh in range(len(eco_ratio_info[0][1])): for jj in range(len(nn_mass_dist)): if jj == 0: value = True else: value = False plot_all_rats(bin_cens_diff[jj][neigh_vals[ii]][hh],\ (plot_frac_arr[ii][hh][jj]),\ neigh_vals[ii],axes_flat[zz],hh,zz,text=value) plot_eco_rats(eco_final_bins[ii][hh],(eco_ratio_info[ii]),\ neigh_vals[ii],axes_flat[zz],hh,zz) zz += 1 plt.subplots_adjust(left=0.04, bottom=0.09, right=0.98, top=0.98,\ hspace=0,wspace=0) # plt.savefig(figsave_path + r"\ratios") plt.show() # In[145]: def plot_hists(bins_high,bins_low,high_counts,low_counts,\ neigh_val,ax,col_num,plot_idx,text=False): """ Returns a plot showing the density-cut, mass counts. Parameters ---------- mass: array-like A 1D array with log stellar mass values neigh_val: integer-like Value which will be inserted into the text label of each plot bins: array-like A 1D array with the values which will be used as the bin edges dlogM: float-like The log difference between bin edges ax: axis-like A value which specifies which axis each subplot is to be plotted to col_num: integer-like Integer which specifies which column is currently being plotted. Used for labelling subplots plot_idx: integer-like Specifies which subplot the figure is plotted to. Used for labeling the x-axis Returns ------- Figure with two curves, optionally (if uncommented) plotted in step """ ax.set_yscale('log') if text == True: if col_num==0: title_label = 'Mass 50/50, {0} NN'.format(neigh_val) ax.text(0.05, 0.95, title_label,horizontalalignment='left',\ verticalalignment='top',transform=ax.transAxes,fontsize=12) elif col_num==1: title_label = 'Mass 25/75, {0} NN'.format(neigh_val) ax.text(0.05, 0.95, title_label,horizontalalignment='left',\ verticalalignment='top',transform=ax.transAxes,fontsize=12) elif col_num==2: title_label = 'Mass 10/90, {0} NN'.format(neigh_val) ax.text(0.05, 0.95, title_label,horizontalalignment='left',\ verticalalignment='top',transform=ax.transAxes,fontsize=12) if plot_idx == 16: ax.set_xlabel('$\log\ (M_{*}/M_{\odot})$',fontsize=18) ax.set_xlim(9.1,11.9) ax.set_ylim([10**-3,10**1]) ax.set_xticks(np.arange(9.5, 12., 0.5)) ax.set_yticks([10**-2,10**0]) ax.plot(bins_high,high_counts,color = 'lightslategrey',alpha=0.2) ax.plot(bins_low,low_counts,color = 'lightslategray',alpha=0.2) def plot_eco_hists(bins_high,bins_low,high_counts,low_counts, \ frac_val,ax,plot_idx): err_key = 'err_{0}'.format(frac_val) ax.errorbar(bins_high[frac_val],high_counts[frac_val],\ yerr=high_counts[err_key],drawstyle='steps-mid',\ color='royalblue',label='Higher Density') ax.errorbar(bins_low[frac_val],low_counts[frac_val],\ yerr=low_counts[err_key],drawstyle='steps-mid',\ color='crimson',label='Lower Density') if plot_idx == 0: ax.legend(loc='best') # In[147]: nrow_num = int(6) ncol_num = int(3) frac_dict = {2:0,4:1,10:2} fig, axes = plt.subplots(nrows=nrow_num, ncols=ncol_num,\ figsize=(150,200), sharex= True,sharey=True) axes_flat = axes.flatten() fig.text(0.02, 0.5,r'$\log\ (\frac{N_{gal}}{N_{total}*dlogM_{*}})$', \ ha='center',va='center',rotation='vertical',fontsize=20) for ii in range(len(mass_dat)): zz = 0 for jj in range(len(neigh_vals)): for hh in frac_vals: if ii == 0: value = True else: value = False plot_hists(hist_high_bins[ii][neigh_vals[jj]][hh],\ hist_low_bins[ii][neigh_vals[jj]][hh],\ hist_high_info[ii][neigh_vals[jj]][hh],\ hist_low_info[ii][neigh_vals[jj]][hh],\ neigh_vals[jj],axes_flat[zz],frac_dict[hh],zz,\ text=value) if ii == 0: plot_eco_hists(eco_high_bins[neigh_vals[jj]],\ eco_low_bins[neigh_vals[jj]],eco_high[neigh_vals[jj]],\ eco_low[neigh_vals[jj]],hh,axes_flat[zz],zz) zz += int(1) plt.subplots_adjust(left=0.07, bottom=0.09, right=0.98, top=0.98,\ hspace=0, wspace=0) # plt.savefig(figsave_path + r"\histograms") plt.show() # In[372]: def plot_all_meds(bin_centers,y_vals,ax,plot_idx,text=False): """ Returns six subplots showing the median distance to the Nth nearest neighbor for each mass bin. Assumes a previously defined figure. Best used in a for-loop Parameters ---------- bin_centers: array-like An array with the medians mass values of the mass bins y_vals: array-like An array containing the median distance values for each mass bin ax: axis-like A value which specifies which axis each subplot is to be plotted to plot_idx: integer-like Specifies which subplot the figure is plotted to. Used for the text label in each subplot Returns ------- Subplots displaying the median distance to Nth nearest neighbor trends for each mass bin """ titles = [1,2,3,5,10,20] ax.set_ylim(0,10**1.5) ax.set_xlim(9.1,11.9) ax.set_yscale('symlog') ax.set_xticks(np.arange(9.5,12.,0.5)) ax.set_yticks(np.arange(0,12,1)) ax.set_yticklabels(np.arange(1,11,2)) ax.tick_params(axis='x', which='major', labelsize=16) if text == True: title_here = 'n = {0}'.format(titles[plot_idx]) ax.text(0.05, 0.95, title_here,horizontalalignment='left',\ verticalalignment='top',transform=ax.transAxes,fontsize=18) if plot_idx == 4: ax.set_xlabel('$\log\ (M_{*}/M_{\odot})$',fontsize=20) ax.plot(bin_centers,y_vals,color='silver') # In[153]: def plot_eco_meds(bin_centers,y_vals,low_lim,up_lim,ax,plot_idx,only=False): """ Returns six subplots showing the median Nth nearest neighbor distance for ECO galaxies in each mass bin Parameters ---------- bin_centers: array-like An array with the medians mass values of the mass bins y_vals: array-like An array containing the median distance values for each mass bin low_lim: array-like An array with the lower cut-off of the bootstrap errors for each median up_lim: array-like An array with the upper cut-off of the bootstrap errors for each median ax: axis-like A value which specifies which axis each subplot is to be plotted to plot_idx: integer-like Specifies which subplot the figure is plotted to. Used for the text label in each subplot Optional -------- only == False To be used when only plotting the ECO median trends, no mocks. Will add in the additional plotting specifications that would have been taken care of previously in a for-loop which plotted the mocks as well Returns ------- Subplots displaying the median distance to Nth nearest neighbor trends for each mass bin, with the bootstrap errors """ if only == True: titles = [1,2,3,5,10,20] ax.set_ylim(0,10**1.5) ax.set_xlim(9.1,11.9) ax.set_yscale('symlog') ax.set_xticks(np.arange(9.5,12.,0.5)) ax.tick_params(axis='both', which='major', labelsize=16) title_here = 'n = {0}'.format(titles[plot_idx]) ax.text(0.05, 0.95, title_here,horizontalalignment='left',\ verticalalignment='top',transform=ax.transAxes,fontsize=18) if plot_idx == 4: ax.set_xlabel('$\log\ (M_{*}/M_{\odot})$',fontsize=18) ax.errorbar(bin_centers,y_vals,yerr=0.1,lolims=low_lim, \ uplims=up_lim,color='maroon',label='ECO') # if plot_idx == 5: # ax.legend(loc='best') # In[378]: nrow_num_mass = int(2) ncol_num_mass = int(3) fig, axes = plt.subplots(nrows=nrow_num_mass, ncols=ncol_num_mass, \ figsize=(100,200), sharex= True, sharey = True) axes_flat = axes.flatten() fig.text(0.01, 0.5, 'Distance to Nth Neighbor (Mpc)', ha='center', \ va='center',rotation='vertical',fontsize=20) zz = int(0) while zz <=4: for ii in range(len(med_plot_arr)): for vv in range(len(nn_mass_dist)): if vv == 0: value = True else: value = False plot_all_meds(mock_meds_bins[vv][neigh_vals[ii]],\ med_plot_arr[neigh_vals[ii]][vv],axes_flat[zz],\ zz,text=value) plot_eco_meds(eco_med_bins[ii],eco_medians[ii][0],\ eco_medians[ii][1],eco_medians[ii][2],\ axes_flat[zz],zz) zz += 1 plt.subplots_adjust(left=0.05, bottom=0.09, right=0.98, top=0.98,\ hspace=0,wspace=0) # plt.savefig(figsave_path + r"\median_distances") plt.show()
hrichstein/Stellar_mass_env_Density
Codes/Scripts/centrals_plotting.py
Python
mit
51,428
[ "Galaxy" ]
2b662d0b3d17100f427609def4135d6a95caa22a72af4303a554ebe60ef180c4
import numpy as np __author__ = 'christopher' def has_adp(atoms): for a in ['adps', 'adp']: if a in atoms.info.keys(): return atoms.info[a] return None class ADP: def __init__(self, atoms, adps=None, adp_momenta=None, adp_equivalency=None, fixed_adps=None): """ Set up the atomic anisotropic displacement parameters. Parameters ---------- atoms: ASE.atoms The atomic configuration adps: np.ndarray The array of ADP values for the atomic configuration adp_momenta: np.ndarray The "momentum" for each ADP, note that this is not real momentum just a mathematical construct for the Hamiltonian dynamics adp_equivalency: np.ndarray An array which describes which ADPS are forced to be equivalent fixed_adps: np.ndarray An array which describes which adps are fixed Returns ------- """ if adps is None: adps = np.ones(atoms.positions.shape) * .005 if adp_momenta is None: adp_momenta = np.zeros(atoms.positions.shape) if adp_equivalency is None: adp_equivalency = np.arange(len(atoms) * 3).reshape( (len(atoms), 3)) if fixed_adps is None: fixed_adps = np.ones(atoms.positions.shape) self.adps = adps.copy() self.adp_momenta = adp_momenta.copy() self.adp_equivalency = adp_equivalency.copy() self.fixed_adps = fixed_adps.copy() self.calc = None def get_positions(self): """ Get the ADP values Returns ------- 2darray: The current ADPs """ return self.adps.copy() def set_positions(self, new_adps): """ Set the ADP positions in a manner consistent with the constraints Parameters ---------- new_adps: np.ndarray The new adp values Returns ------- """ delta_adps = new_adps.copy() - self.adps # Make all the equivalent adps the same unique_adps = np.unique(self.adp_equivalency) for i in unique_adps: delta_adps[np.where(self.adp_equivalency == i)] = np.mean( delta_adps[np.where(self.adp_equivalency == i)]) # No changes for the fixed adps, where fixed is zero delta_adps *= self.fixed_adps self.adps += delta_adps def get_momenta(self): """ Get the current ADP momenta Returns ------- 2darray: The current momentum """ return self.adp_momenta def set_momenta(self, new_momenta): """ Set the ADP momentum Parameters ---------- new_momenta: 2darray The new momentum values Returns ------- """ self.adp_momenta = new_momenta.copy() def get_velocities(self): return self.get_momenta() def get_forces(self, atoms): """ Get the forces on the ADPs from the APD calculator ..note:: It may seem a bit odd to have forces working on the ADPs since they aren't particle positions but a better way to think of it as a description of how the ADPs should change, in both magnitude and direction to best minimize the potential energy of the system (however that is calculated). Parameters ---------- atoms: ase.atoms The atomic configuration Returns ------- 2darray: The forces on each of the adps """ return self.calc.get_forces(atoms) def set_calculator(self, calc): """ Set the calculator for the ADPS, this will calculate the potential energy and forces associated with the ADPS Parameters ---------- calc Returns ------- """ self.calc = calc def del_adp(self, index): """ Delete an ADP from the system, usually this accompanies the deletion of an atom. Parameters ---------- index: int The index of the ADP to be deleted Returns ------- """ for a in [self.adps, self.adp_momenta, self.adp_equivalency, self.fixed_adps]: a = np.delete(a, index, 0) def add_adp(self, adp=None, adp_momentum=None, adp_equivalency=None, fixed_adp=None): """ Add an ADP to the system, usually this accompanies the addition of an atom. Parameters ---------- adp: np.ndarray The new adp values adp_momentum: np.ndarray The momentum of the new adp adp_equivalency: int Which adps this adp is equivalent to fixed_adp: int The fixed values for the adps. Returns ------- """ if adp is None: adp = np.ones((1, 3)) * .005 if adp_momentum is None: adp_momentum = np.zeros((1, 3)) if adp_equivalency is None: adp_equivalency = np.arange(3) + np.max(self.adp_equivalency) if fixed_adp is None: fixed_adp = np.ones((1, 3)) for a, b in zip( [self.adps, self.adp_momenta, self.adp_equivalency, self.fixed_adps], [adp, adp_momentum, adp_equivalency, fixed_adp]): a = np.vstack([a, b])
CJ-Wright/pyIID
pyiid/adp.py
Python
bsd-3-clause
5,607
[ "ASE" ]
06ffe597dc9ef1ed1b3d06a54bdb6f009d79f743db1f240fa971809ce487abda
# # Copyright (C) 2010-2018 The ESPResSo project # Copyright (C) 2002,2003,2004,2005,2006,2007,2008,2009,2010 # Max-Planck-Institute for Polymer Research, Theory Group # # This file is part of ESPResSo. # # ESPResSo is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # ESPResSo is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # from espressomd import assert_features, electrostatics, electrostatic_extensions from espressomd.shapes import Wall from espressomd.visualization_opengl import * import numpy from threading import Thread from time import sleep assert_features(["ELECTROSTATICS", "MASS", "LENNARD_JONES"]) system = espressomd.System(box_l=[1.0, 1.0, 1.0]) system.seed = system.cell_system.get_state()['n_nodes'] * [1234] numpy.random.seed(system.seed) print("\n--->Setup system") # System parameters n_part = 1000 n_ionpairs = n_part / 2 density = 1.1138 time_step = 0.001823 temp = 1198.3 gamma = 50 #l_bjerrum = 0.885^2 * e^2/(4*pi*epsilon_0*k_B*T) l_bjerrum = 130878.0 / temp Ez = 0 num_steps_equilibration = 500 # Particle parameters types = {"Cl": 0, "Na": 1, "Electrode": 2} numbers = {"Cl": n_ionpairs, "Na": n_ionpairs} charges = {"Cl": -1.0, "Na": 1.0} lj_sigmas = {"Cl": 3.85, "Na": 2.52, "Electrode": 3.37} lj_epsilons = {"Cl": 192.45, "Na": 17.44, "Electrode": 24.72} lj_cuts = {"Cl": 3.0 * lj_sigmas["Cl"], "Na": 3.0 * lj_sigmas["Na"], "Electrode": 3.0 * lj_sigmas["Electrode"]} masses = {"Cl": 35.453, "Na": 22.99, "Electrode": 12.01} # Setup System box_l = (n_ionpairs * sum(masses.values()) / density)**(1. / 3.) box_z = box_l + 2.0 * (lj_sigmas["Electrode"]) box_volume = box_l * box_l * box_z elc_gap = box_z * 0.15 system.box_l = [box_l, box_l, box_z + elc_gap] system.periodicity = [1, 1, 1] system.time_step = time_step system.cell_system.skin = 0.3 system.thermostat.set_langevin(kT=temp, gamma=gamma) # Visualizer visualizer = openGLLive(system, camera_position=[-3 * box_l, box_l * 0.5, box_l * 0.5], camera_right=[ 0, 0, 1], drag_force=5 * 298, background_color=[1, 1, 1], light_pos=[30, 30, 30], ext_force_arrows_scale=[0.0001], ext_force_arrows=False) # Walls system.constraints.add(shape=Wall( dist=0, normal=[0, 0, 1]), particle_type=types["Electrode"]) system.constraints.add(shape=Wall( dist=-box_z, normal=[0, 0, -1]), particle_type=types["Electrode"]) # Place particles for i in range(int(n_ionpairs)): p = numpy.random.random(3) * box_l p[2] += lj_sigmas["Electrode"] system.part.add(id=len(system.part), type=types["Cl"], pos=p, q=charges["Cl"], mass=masses["Cl"]) for i in range(int(n_ionpairs)): p = numpy.random.random(3) * box_l p[2] += lj_sigmas["Electrode"] system.part.add(id=len(system.part), type=types["Na"], pos=p, q=charges["Na"], mass=masses["Na"]) # Lennard-Jones interactions parameters def combination_rule_epsilon(rule, eps1, eps2): if rule == "Lorentz": return (eps1 * eps2)**0.5 else: return ValueError("No combination rule defined") def combination_rule_sigma(rule, sig1, sig2): if rule == "Berthelot": return (sig1 + sig2) * 0.5 else: return ValueError("No combination rule defined") for s in [["Cl", "Na"], ["Cl", "Cl"], ["Na", "Na"], ["Na", "Electrode"], ["Cl", "Electrode"]]: lj_sig = combination_rule_sigma( "Berthelot", lj_sigmas[s[0]], lj_sigmas[s[1]]) lj_cut = combination_rule_sigma("Berthelot", lj_cuts[s[0]], lj_cuts[s[1]]) lj_eps = combination_rule_epsilon( "Lorentz", lj_epsilons[s[0]], lj_epsilons[s[1]]) system.non_bonded_inter[types[s[0]], types[s[1]]].lennard_jones.set_params( epsilon=lj_eps, sigma=lj_sig, cutoff=lj_cut, shift="auto") system.minimize_energy.init( f_max=10, gamma=10, max_steps=2000, max_displacement=0.1) system.minimize_energy.minimize() print("\n--->Tuning Electrostatics") p3m = electrostatics.P3M(bjerrum_length=l_bjerrum, accuracy=1e-2) system.actors.add(p3m) elc = electrostatic_extensions.ELC(gap_size=elc_gap, maxPWerror=1e-3) system.actors.add(elc) def increaseElectricField(): global Ez Ez += 1000 for p in system.part: p.ext_force = [0, 0, Ez * p.q] print(Ez) def decreaseElectricField(): global Ez Ez -= 1000 for p in system.part: p.ext_force = [0, 0, Ez * p.q] print(Ez) # Register buttons visualizer.keyboardManager.registerButton(KeyboardButtonEvent( 'u', KeyboardFireEvent.Hold, increaseElectricField)) visualizer.keyboardManager.registerButton(KeyboardButtonEvent( 'j', KeyboardFireEvent.Hold, decreaseElectricField)) def main(): print("\n--->Integration") system.time = 0.0 while True: system.integrator.run(1) visualizer.update() # Start simulation in separate thread t = Thread(target=main) t.daemon = True t.start() # Start blocking visualizer visualizer.start()
hmenke/espresso
doc/tutorials/02-charged_system/scripts/nacl_units_confined_vis.py
Python
gpl-3.0
5,420
[ "ESPResSo" ]
d698c66260da2cb190e4e035a4e4de0576d811affbd1cd02ca1e3af015a65206
#!/usr/bin/env python # # @file GeneralFunctions.py # @brief class to create general functions # @author Frank Bergmann # @author Sarah Keating # # <!-------------------------------------------------------------------------- # # Copyright (c) 2013-2015 by the California Institute of Technology # (California, USA), the European Bioinformatics Institute (EMBL-EBI, UK) # and the University of Heidelberg (Germany), with support from the National # Institutes of Health (USA) under grant R01GM070923. All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. # # Neither the name of the California Institute of Technology (Caltech), nor # of the European Bioinformatics Institute (EMBL-EBI), nor of the University # of Heidelberg, nor the names of any contributors, may be used to endorse # or promote products derived from this software without specific prior # written permission. # ------------------------------------------------------------------------ --> from util import strFunctions, global_variables class GeneralFunctions(): """Class for general functions""" def __init__(self, language, is_cpp_api, is_list_of, class_object): self.language = language self.cap_language = language.upper() self.package = class_object['package'] self.class_name = class_object['name'] self.has_std_base = class_object['has_std_base'] self.base_class = class_object['baseClass'] self.is_cpp_api = is_cpp_api self.is_list_of = is_list_of self.is_plugin = False if 'is_plugin' in class_object: self.is_plugin = class_object['is_plugin'] self.is_doc_plugin = False if 'is_doc_plugin' in class_object: self.is_doc_plugin = class_object['is_doc_plugin'] self.ext_class = '' if self.is_plugin: self.ext_class = class_object['sbase'] if is_list_of: self.child_name = class_object['lo_child'] else: self.child_name = '' if is_cpp_api: self.object_name = self.class_name self.object_child_name = self.child_name else: if is_list_of: self.object_name = 'ListOf_t' else: self.object_name = self.class_name + '_t' self.object_child_name = self.child_name + '_t' self.element_name = '' self.override_name = False if 'elementName' in class_object and not is_list_of: self.element_name = class_object['elementName'] if self.element_name == '': self.override_name = False else: self.override_name = not \ strFunctions.compare_no_case(self.element_name, self.class_name) if not global_variables.is_package: self.override_name = True if is_list_of: self.element_name = \ strFunctions.lower_list_of_name_no_prefix(class_object['elementName']) else: self.element_name = class_object['elementName'] self.typecode = class_object['typecode'] self.attributes = class_object['class_attributes'] self.sid_refs = class_object['sid_refs'] self.unit_sid_refs = class_object['unit_sid_refs'] self.child_lo_elements = class_object['child_lo_elements'] self.child_elements = class_object['child_elements'] self.has_math = class_object['has_math'] self.has_array = class_object['has_array'] self.overwrites_children = class_object['overwrites_children'] self.has_children = class_object['has_children'] self.has_only_math = class_object['has_only_math'] self.num_non_std_children = class_object['num_non_std_children'] self.num_children = class_object['num_children'] self.std_base = class_object['std_base'] self.required = 'false' if 'is_doc_plugin' in class_object: if class_object['reqd']: self.required = 'true' self.document = False if 'document' in class_object: self.document = class_object['document'] # useful variables if not self.is_cpp_api and self.is_list_of: self.struct_name = self.object_child_name else: self.struct_name = self.object_name self.abbrev_parent = strFunctions.abbrev_name(self.object_name) if self.is_cpp_api is False: self.true = '@c 1' self.false = '@c 0' else: self.true = '@c true' self.false = '@c false' # status if self.is_cpp_api: if self.is_list_of: self.status = 'cpp_list' else: self.status = 'cpp_not_list' else: if self.is_list_of: self.status = 'c_list' else: self.status = 'c_not_list' ######################################################################## # Functions for writing renamesidref # function to write rename_sid_ref def write_rename_sidrefs(self): # only write is not list of and has sidrefs if not self.status == 'cpp_not_list': return elif len(self.sid_refs) == 0 and len(self.unit_sid_refs) == 0\ and not self.has_math: return # create comment parts title_line = '@copydoc doc_renamesidref_common' params = [] return_lines = [] additional = [] # create the function declaration function = 'renameSIdRefs' return_type = 'void' arguments = ['const std::string& oldid', 'const std::string& newid'] # create the function implementation code = [] for i in range(0, len(self.sid_refs)): ref = self.sid_refs[i] implementation = ['isSet{0}() && {1} == ' 'oldid'.format(ref['capAttName'], ref['memberName']), 'set{0}(newid)'.format(ref['capAttName'])] code.append(dict({'code_type': 'if', 'code': implementation})) for i in range(0, len(self.unit_sid_refs)): ref = self.unit_sid_refs[i] implementation = ['isSet{0}() && {1} == ' 'oldid'.format(ref['capAttName'], ref['memberName']), 'set{0}(newid)'.format(ref['capAttName'])] code.append(dict({'code_type': 'if', 'code': implementation})) if self.has_math: implementation = ['isSetMath()', 'mMath->renameSIdRefs(oldid, newid)'] code.append(self.create_code_block('if', implementation)) # return the parts return dict({'title_line': title_line, 'params': params, 'return_lines': return_lines, 'additional': additional, 'function': function, 'return_type': return_type, 'arguments': arguments, 'constant': False, 'virtual': True, 'object_name': self.struct_name, 'implementation': code}) ######################################################################## # Functions for writing get element/typecode functionss # function to write getElement def write_get_element_name(self): if not self.is_cpp_api: return # create comment parts if self.override_name: name = self.element_name else: name = strFunctions.lower_first(self.object_name) title_line = 'Returns the XML element name of this {0} object.'\ .format(self.object_name,) params = ['For {0}, the XML element name is always @c ' '\"{1}\".'.format(self.object_name, name)] return_lines = ['@return the name of this element, i.e. @c \"{0}\"' '.'.format(name)] additional = [] # create the function declaration arguments = [] function = 'getElementName' return_type = 'const std::string&' # create the function implementation if self.overwrites_children: implementation = ['return mElementName'] else: implementation = ['static const string name = \"{0}\"'.format(name), 'return name'] code = [dict({'code_type': 'line', 'code': implementation})] # return the parts return dict({'title_line': title_line, 'params': params, 'return_lines': return_lines, 'additional': additional, 'function': function, 'return_type': return_type, 'arguments': arguments, 'constant': True, 'virtual': True, 'object_name': self.struct_name, 'implementation': code}) # function to write getTypeCode def write_get_typecode(self): if not self.is_cpp_api: return # create comment lib = global_variables.library_name; if self.cap_language == 'SBML' or self.cap_language == 'SEDML': lib = 'lib{0}'.format(self.cap_language) title_line = 'Returns the {0} type code for this {1} object.'\ .format(lib, self.object_name) params = ['@copydetails doc_what_are_typecodes'] return_lines = ['@return the {0} type code for this ' 'object:'.format(self.cap_language)] additional = [] if self.is_list_of: line = '@{0}constant{2}{1}_LIST_OF, ' \ '{1}TypeCode_t{3}'.format(self.language, self.cap_language, '{', '}') else: line = '@{0}constant{1}{2}, {3}{4}' \ 'TypeCode_t{5}'.format(self.language, '{', self.typecode, self.cap_language, self.package, '}') additional.append(line) additional.append(' ') additional.append('@copydetails doc_warning_typecodes_not_unique') if not self.is_list_of: additional.append(' ') additional.append('@see getElementName()') if global_variables.is_package: additional.append('@see getPackageName()') # create function declaration function = 'getTypeCode' arguments = [] return_type = 'int' # create the function implementation if self.is_list_of: implementation = ['return {0}_LIST_OF'.format(self.cap_language)] else: implementation = ['return {0}'.format(self.typecode)] code = [dict({'code_type': 'line', 'code': implementation})] # return the parts return dict({'title_line': title_line, 'params': params, 'return_lines': return_lines, 'additional': additional, 'function': function, 'return_type': return_type, 'arguments': arguments, 'constant': True, 'virtual': True, 'object_name': self.struct_name, 'implementation': code}) # function to write getTypeCode def write_get_item_typecode(self): # only needed for cpp list of class if not self.status == 'cpp_list': return # create comment title_line = 'Returns the lib{0} type code for the {0} objects ' \ 'contained in this {1} object.'.format(self.cap_language, self.object_name) params = ['@copydetails doc_what_are_typecodes'] return_lines = ['@return the {0} typecode for the ' 'objects contained in this ' '{1}:'.format(self.cap_language, self.object_name)] additional = [] line = '@{0}constant{1}{2}, {3}{4}TypeCode_t{5}' \ ''.format(self.language, '{', self.typecode, self.cap_language, self.package, '}') additional.append(line) additional.append(' ') additional.append('@copydetails doc_warning_typecodes_not_unique') additional.append(' ') additional.append('@see getElementName()') if global_variables.is_package: additional.append('@see getPackageName()') # create function declaration function = 'getItemTypeCode' arguments = [] return_type = 'int' # create the function implementation implementation = ['return {0}'.format(self.typecode)] code = [dict({'code_type': 'line', 'code': implementation})] # return the parts return dict({'title_line': title_line, 'params': params, 'return_lines': return_lines, 'additional': additional, 'function': function, 'return_type': return_type, 'arguments': arguments, 'constant': True, 'virtual': True, 'object_name': self.struct_name, 'implementation': code}) ######################################################################## # Functions for writing checking necessary children status # function to write hasRequiredAttributes def write_has_required_attributes(self): if self.has_std_base and len(self.attributes) == 0: return # create comment parts title_line = 'Predicate returning {0} if all the required ' \ 'attributes for this {1} object have been set.'\ .format(self.true, self.object_name) params = [] if not self.is_cpp_api: params.append('@param {0} the {1} structure.' .format(self.abbrev_parent, self.object_name)) return_lines = ['@return {0} to indicate that all the required ' 'attributes of this {1} have been set, otherwise ' '{2} is returned.'.format(self.true, self.object_name, self.false)] additional = [' ', '@note The required attributes for the {0} object' ' are:'.format(self.object_name)] for i in range(0, len(self.attributes)): if self.attributes[i]['reqd']: att_name = self.attributes[i]['xml_name'] additional.append('@li \"{0}\"'.format(att_name)) # create the function declaration if self.is_cpp_api: function = 'hasRequiredAttributes' return_type = 'bool' else: function = '{0}_hasRequiredAttributes'.format(self.class_name) return_type = 'int' arguments = [] if not self.is_cpp_api: arguments.append('const {0} * {1}' .format(self.object_name, self.abbrev_parent)) # create the function implementation if self.is_cpp_api: if self.has_std_base: all_present = 'true' else: all_present = '{0}::hasRequired' \ 'Attributes()'.format(self.base_class) code = [dict({'code_type': 'line', 'code': ['bool all' 'Present = {0}'.format(all_present)]})] for i in range(0, len(self.attributes)): att = self.attributes[i] if att['reqd']: implementation = ['isSet{0}() == ' 'false'.format(att['capAttName']), 'allPresent = false'] code.append(dict({'code_type': 'if', 'code': implementation})) code.append(dict({'code_type': 'line', 'code': ['return allPresent']})) else: line = ['return ({0} != NULL) ? static_cast<int>({0}->' 'hasRequiredAttributes()) : 0'.format(self.abbrev_parent)] code = [dict({'code_type': 'line', 'code': line})] # return the parts return dict({'title_line': title_line, 'params': params, 'return_lines': return_lines, 'additional': additional, 'function': function, 'return_type': return_type, 'arguments': arguments, 'constant': True, 'virtual': True, 'object_name': self.struct_name, 'implementation': code}) # function to write hasRequiredElements def write_has_required_elements(self): if not self.has_children: return # create comment parts title_line = 'Predicate returning {0} if all the required ' \ 'elements for this {1} object have been set.'\ .format(self.true, self.object_name) params = [] if not self.is_cpp_api: params.append('@param {0} the {1} structure.' .format(self.abbrev_parent, self.object_name)) return_lines = ['@return {0} to indicate that all the required ' 'elements of this {1} have been set, otherwise ' '{2} is returned.'.format(self.true, self.object_name, self.false)] additional = [' ', '@note The required elements for the {0} object' ' are:'.format(self.object_name)] for i in range(0, len(self.child_elements)): if self.child_elements[i]['reqd']: additional.append('@li \"{0}\"' .format(self.child_elements[i]['name'])) for i in range(0, len(self.child_lo_elements)): if self.child_lo_elements[i]['reqd']: additional.append('@li \"{0}\"' .format(self.child_lo_elements[i]['name'])) # create the function declaration if self.is_cpp_api: function = 'hasRequiredElements' return_type = 'bool' else: function = '{0}_hasRequiredElements'.format(self.class_name) return_type = 'int' arguments = [] if not self.is_cpp_api: arguments.append('const {0} * {1}' .format(self.object_name, self.abbrev_parent)) # create the function implementation if self.is_cpp_api: if self.has_std_base: all_present = 'true' else: all_present = '{0}::hasRequired' \ 'Elements()'.format(self.base_class) code = [dict({'code_type': 'line', 'code': ['bool allPresent ' '= {0}'.format(all_present)]})] for i in range(0, len(self.child_elements)): att = self.child_elements[i] if att['reqd']: implementation = ['isSet{0}() == ' 'false'.format(att['capAttName']), 'allPresent = false'] code.append(dict({'code_type': 'if', 'code': implementation})) for i in range(0, len(self.child_lo_elements)): att = self.child_lo_elements[i] if att['reqd']: name = strFunctions.upper_first(att['pluralName']) implementation = ['getNum{0}() == ' '0'.format(name), 'allPresent = false'] code.append(dict({'code_type': 'if', 'code': implementation})) code.append(dict({'code_type': 'line', 'code': ['return allPresent']})) else: line = ['return ({0} != NULL) ? static_cast<int>({0}->' 'hasRequiredElements()) : 0'.format(self.abbrev_parent)] code = [dict({'code_type': 'line', 'code': line})] # return the parts return dict({'title_line': title_line, 'params': params, 'return_lines': return_lines, 'additional': additional, 'function': function, 'return_type': return_type, 'arguments': arguments, 'constant': True, 'virtual': True, 'object_name': self.struct_name, 'implementation': code}) ######################################################################## # Functions for writing general functions: writeElement, accept # setDocument, write (if we have an array) # function to write writeElement def write_write_elements(self): if not self.status == 'cpp_not_list': if not(self.status == 'cpp_list' and len(self.child_elements) > 0): return elif self.is_doc_plugin: return # create comment parts title_line = 'Write any contained elements' params = [] return_lines = [] additional = [] # create the function declaration function = 'writeElements' return_type = 'void' if global_variables.is_package: arguments = ['XMLOutputStream& stream'] else: arguments = ['LIBSBML_CPP_NAMESPACE_QUALIFIER XMLOutputStream& stream'] # create the function implementation base = self.base_class if not self.is_plugin: code = [dict({'code_type': 'line', 'code': ['{0}::writeElements(stream)'.format(base)]})] else: code = [] for i in range(0, len(self.child_elements)): att = self.child_elements[i] if att['element'] == 'ASTNode': if global_variables.is_package: line = ['writeMathML(getMath(), stream, get{0}' 'Namespaces())'.format(global_variables.prefix)] else: line = ['writeMathML(getMath(), stream, NULL)'] elif att['element'] == 'XMLNode': line = ['stream.startElement(\"{0}\")'.format(att['name']), 'stream << *{0}'.format(att['memberName']), 'stream.endElement(\"{0}\")'.format(att['name'])] else: line = ['{0}->write(stream)'.format(att['memberName'])] implementation = ['isSet{0}() == true'.format(att['capAttName'])] implementation += line code.append(dict({'code_type': 'if', 'code': implementation})) for i in range(0, len(self.child_lo_elements)): att = self.child_lo_elements[i] if self.is_plugin: name = att['pluralName'][6:] else: # hack for spatial csg elements if self.package == 'Spatial' and \ att['pluralName'].startswith('csg'): name = 'CSG' + att['pluralName'][3:] else: name = strFunctions.remove_prefix(strFunctions.upper_first(att['pluralName'])) if att['type'] == 'inline_lo_element': implementation = ['unsigned int i = 0; i < getNum{0}(); i++'.format(name), 'get{0}(i)->write(stream)'.format(strFunctions.singular(name))] code.append(dict({'code_type': 'for', 'code': implementation})) else: implementation = ['getNum{0}() > ' '0'.format(name), '{0}.write(stream)'.format(att['memberName'])] code.append(dict({'code_type': 'if', 'code': implementation})) if not self.is_plugin and global_variables.is_package: code.append(dict({'code_type': 'line', 'code': ['{0}::writeExtension' 'Elements' '(stream)'.format(self.std_base)]})) # look and see if we have a vector attribute which would need # to be written here for attrib in self.attributes: if 'isVector' in attrib and attrib['isVector']: code.append(self.write_write_vector(attrib)) # return the parts return dict({'title_line': title_line, 'params': params, 'return_lines': return_lines, 'additional': additional, 'function': function, 'return_type': return_type, 'arguments': arguments, 'constant': True, 'virtual': True, 'object_name': self.struct_name, 'implementation': code}) def write_write_vector(self, attrib): implementation = ['std::vector<{0}>::const_iterator it = {1}.begin(); ' 'it != {1}.end(); ++it'.format(attrib['element'], attrib['memberName']), 'stream.startElement(\"{0}\")'.format(attrib['name']), 'stream.setAutoIndent(false)', 'stream << \" \" << *it << \" \"', 'stream.endElement(\"{0}\")'.format(attrib['name']), 'stream.setAutoIndent(true)'] nested_for = self.create_code_block('for', implementation) implementation = ['has{0}()'.format(strFunctions.plural(attrib['capAttName'])), nested_for] code = self.create_code_block('if', implementation) return code # function to write accept def write_accept(self): if not self.status == 'cpp_not_list': return # create comment parts title_line = 'Accepts the given ' \ '{0}Visitor'.format(global_variables.prefix) params = [] return_lines = [] additional = [] # create the function declaration function = 'accept' return_type = 'bool' arguments = ['{0}Visitor& v'.format(global_variables.prefix)] # create the function implementation simple = False # cover cases where a doc plugin is used (no children but not simple) # or there are children but they are non std based children (simple) if self.has_children: if self.num_children == self.num_non_std_children: simple = True else: if not self.is_plugin: simple = True if not global_variables.is_package: implementation = ['return false'] code = [dict({'code_type': 'line', 'code': implementation})] elif simple: implementation = ['return v.visit(*this)'] code = [dict({'code_type': 'line', 'code': implementation})] else: if not self.is_plugin: code = [dict({'code_type': 'line', 'code': ['v.visit(*this)']})] else: obj = strFunctions.abbrev_name(self.ext_class) implementation = ['const {0}* {1} = static_cast<const {0}*>' '(this->getParent{2}Object()' ')'.format(self.ext_class, obj, self.cap_language), 'v.visit(*{0})'.format(obj), 'v.leave(*{0})'.format(obj)] code = [self.create_code_block('line', implementation)] for i in range(0, len(self.child_elements)): elem = self.child_elements[i] implementation = ['{0} != NULL'.format(elem['memberName']), '{0}->accept(v)'.format(elem['memberName'])] code.append(dict({'code_type': 'if', 'code': implementation})) for i in range(0, len(self.child_lo_elements)): att = self.child_lo_elements[i] implementation = ['{0}.accept(v)'.format(att['memberName'])] code.append(dict({'code_type': 'line', 'code': implementation})) if not self.is_plugin: code.append(dict({'code_type': 'line', 'code': ['v.leave(*this)', 'return true']})) else: code.append(self.create_code_block('line', ['return true'])) # return the parts return dict({'title_line': title_line, 'params': params, 'return_lines': return_lines, 'additional': additional, 'function': function, 'return_type': return_type, 'arguments': arguments, 'constant': True, 'virtual': True, 'object_name': self.struct_name, 'implementation': code}) # function to write setDocument def write_set_document(self): if not self.status == 'cpp_not_list': return elif self.is_doc_plugin: return # create comment parts title_line = 'Sets the parent ' \ '{0}'.format(global_variables.document_class) params = [] return_lines = [] additional = [] # create the function declaration function = 'set{0}'.format(global_variables.document_class) return_type = 'void' arguments = ['{0}* d'.format(global_variables.document_class)] # create the function implementation if self.base_class: line = '{0}::set{1}(d)'.format(self.base_class, global_variables.document_class) implementation = [line] code = [dict({'code_type': 'line', 'code': implementation})] else: code = [] if self.has_children and not self.has_only_math: for i in range(0, len(self.child_elements)): att = self.child_elements[i] if 'is_ml' in att and att['is_ml']: continue else: implementation = ['{0} != NULL'.format(att['memberName']), '{0}->{1}' '(d)'.format(att['memberName'], function)] code.append(self.create_code_block('if', implementation)) for i in range(0, len(self.child_lo_elements)): att = self.child_lo_elements[i] implementation = ['{0}.{1}' '(d)'.format(att['memberName'], function)] code.append(dict({'code_type': 'line', 'code': implementation})) # return the parts return dict({'title_line': title_line, 'params': params, 'return_lines': return_lines, 'additional': additional, 'function': function, 'return_type': return_type, 'arguments': arguments, 'constant': False, 'virtual': True, 'object_name': self.struct_name, 'implementation': code}) # function to write_write if there is an array def write_write(self): if not self.has_array: return elif not self.status == 'cpp_not_list': return # create comment parts title_line = 'used to write arrays' params = [] return_lines = [] additional = [] # create the function declaration function = 'write' return_type = 'void' if global_variables.is_package: arguments = ['XMLOutputStream& stream'] else: arguments = ['LIBSBML_CPP_NAMESPACE_QUALIFIER XMLOutputStream& stream'] # create the function implementation # find the array attribute name = '' member = '' array_type = '' for attrib in self.attributes: if attrib['isArray']: name = attrib['capAttName'] member = attrib['memberName'] array_type = attrib['element'] if array_type == 'int': array_type = 'long' code = [self.create_code_block('line', ['stream.startElement(getElementName(), ' 'getPrefix())', 'writeAttributes(stream)'])] nested_for = self.create_code_block( 'for', ['int i = 0; i < m{0}Length; ++i'.format(name), 'stream << ({0}){1}[i] << \" \"' ''.format(array_type, member)]) implementation = ['isSet{0}()'.format(name), nested_for] code.append(self.create_code_block('if', implementation)) code.append(self.create_code_block( 'line', ['stream.endElement(getElementName(), getPrefix())'])) # return the parts return dict({'title_line': title_line, 'params': params, 'return_lines': return_lines, 'additional': additional, 'function': function, 'return_type': return_type, 'arguments': arguments, 'constant': True, 'virtual': True, 'object_name': self.struct_name, 'implementation': code}) ######################################################################## # Functions for dealing with packages: enablePackage, connectToChild # function to write enable_package def write_enable_package(self): if not self.status == 'cpp_not_list': return elif self.is_doc_plugin: return # create comment parts title_line = 'Enables/disables the given package with this element' params = [] return_lines = [] additional = [] # create the function declaration function = 'enablePackageInternal' return_type = 'void' arguments = ['const std::string& pkgURI', 'const std::string& pkgPrefix', 'bool flag'] # create the function implementation code = [] if not self.is_plugin and self.base_class: implementation = ['{0}::enablePackageInternal(pkgURI, pkgPrefix, ' 'flag)'.format(self.base_class)] code = [dict({'code_type': 'line', 'code': implementation})] if self.has_children and not self.has_only_math: for i in range(0, len(self.child_elements)): att = self.child_elements[i] if 'is_ml' in att and att['is_ml']: continue else: implementation = ['isSet{0}()'.format(att['capAttName']), '{0}->enablePackageInternal' '(pkgURI, pkgPrefix, ' 'flag)'.format(att['memberName'])] code.append(dict({'code_type': 'if', 'code': implementation})) for i in range(0, len(self.child_lo_elements)): att = self.child_lo_elements[i] implementation = ['{0}.enablePackageInternal' '(pkgURI, pkgPrefix, ' 'flag)'.format(att['memberName'])] code.append(dict({'code_type': 'line', 'code': implementation})) # return the parts return dict({'title_line': title_line, 'params': params, 'return_lines': return_lines, 'additional': additional, 'function': function, 'return_type': return_type, 'arguments': arguments, 'constant': False, 'virtual': True, 'object_name': self.struct_name, 'implementation': code}) # function to write connectToChild def write_connect_to_child(self): if not self.is_cpp_api: return elif not self.has_children: return # create comment parts title_line = 'Connects to child elements' params = [] return_lines = [] additional = [] # create the function declaration function = 'connectToChild' return_type = 'void' arguments = [] # create the function implementation if not self.is_plugin: implementation = ['{0}::connectToChild()'.format(self.base_class)] code = [dict({'code_type': 'line', 'code': implementation})] for i in range(0, len(self.child_elements)): att = self.child_elements[i] if 'is_ml' in att and att['is_ml']: continue else: implementation = ['{0} != NULL'.format(att['memberName']), '{0}->connectToParent' '(this)'.format(att['memberName'])] code.append(self.create_code_block('if', implementation)) for i in range(0, len(self.child_lo_elements)): att = self.child_lo_elements[i] implementation = ['{0}.connectToParent' '(this)'.format(att['memberName'])] code.append(dict({'code_type': 'line', 'code': implementation})) else: code = [self.create_code_block('line', ['connectToParent(getParent' '{0}Object()' ')'.format(self.cap_language)])] # return the parts return dict({'title_line': title_line, 'params': params, 'return_lines': return_lines, 'additional': additional, 'function': function, 'return_type': return_type, 'arguments': arguments, 'constant': False, 'virtual': True, 'object_name': self.struct_name, 'implementation': code}) # function to write connectToParent def write_connect_to_parent(self): if not self.is_cpp_api: return elif not self.has_children: return # create comment parts title_line = 'Connects to parent element' params = [] return_lines = [] additional = [] # create the function declaration function = 'connectToParent' return_type = 'void' arguments = ['{0}* base'.format(self.std_base)] # create the function implementation implementation = ['{0}::connectToParent(base)'.format(self.base_class)] code = [dict({'code_type': 'line', 'code': implementation})] for i in range(0, len(self.child_elements)): att = self.child_elements[i] if 'is_ml' in att and att['is_ml']: continue else: implementation = ['{0} != NULL'.format(att['memberName']), '{0}->connectToParent' '(base)'.format(att['memberName'])] code.append(self.create_code_block('if', implementation)) for i in range(0, len(self.child_lo_elements)): att = self.child_lo_elements[i] implementation = ['{0}.connectToParent' '(base)'.format(att['memberName'])] code.append(dict({'code_type': 'line', 'code': implementation})) # return the parts return dict({'title_line': title_line, 'params': params, 'return_lines': return_lines, 'additional': additional, 'function': function, 'return_type': return_type, 'arguments': arguments, 'constant': False, 'virtual': True, 'object_name': self.struct_name, 'implementation': code}) ######################################################################## # Functions for when an element has a different XML name # function to write setElementName def write_set_element_name(self): if not self.is_cpp_api: return if not self.overwrites_children: return # create comment parts title_line = 'Sets the XML name of this {0} object.'\ .format(self.object_name,) params = [] return_lines = [] additional = [] # create the function declaration arguments = ['const std::string& name'] function = 'setElementName' return_type = 'void' # create the function implementation implementation = ['mElementName = name'] code = [dict({'code_type': 'line', 'code': implementation})] # return the parts return dict({'title_line': title_line, 'params': params, 'return_lines': return_lines, 'additional': additional, 'function': function, 'return_type': return_type, 'arguments': arguments, 'constant': False, 'virtual': True, 'object_name': self.struct_name, 'implementation': code}) ######################################################################## # Functions for document plugin # function to write is comp flattening done def write_is_comp_flat(self): if not self.is_doc_plugin: return # create comment parts title_line = 'Predicate indicating whether \'comp\' flattening has ' \ 'been implemented for the {0} package.' \ ''.format(self.package) params = [] return_lines = [] additional = [] # create the function declaration arguments = [] function = 'isCompFlatteningImplemented' return_type = 'bool' # create the function implementation code = [dict({'code_type': 'line', 'code': ['return false']})] # return the parts return dict({'title_line': title_line, 'params': params, 'return_lines': return_lines, 'additional': additional, 'function': function, 'return_type': return_type, 'arguments': arguments, 'constant': True, 'virtual': True, 'object_name': self.struct_name, 'implementation': code}) # function to write check consistency def write_check_consistency(self): if not self.is_doc_plugin: return # create comment parts title_line = 'Calls check consistency for any relevant ' \ '{0} validators.'.format(self.package) params = [] return_lines = [] additional = [] # create the function declaration arguments = [] function = 'checkConsistency' return_type = 'unsigned int' # create the function implementation implementation = ['unsigned int nerrors = 0', 'unsigned int total_errors = 0'] code = [self.create_code_block('line', implementation)] implementation = ['{0}* doc = static_cast<{0}*>(this->' 'getParent{1}' 'Object())'.format(global_variables.document_class, self.cap_language), '{0}ErrorLog* log = doc->getError' 'Log()'.format(self.cap_language)] code.append(self.create_code_block('line', implementation)) implementation = ['unsigned char applicableValidators = ' 'doc->getApplicableValidators()', 'bool id = ((applicableValidators & 0x01) ==0x01)', 'bool core = ((applicableValidators & 0x02) ==0x02)'] code.append(self.create_code_block('line', implementation)) implementation = ['{0}IdentifierConsistencyValidator ' 'id_validator'.format(self.package), '{0}ConsistencyValidator ' 'core_validator'.format(self.package)] code.append(self.create_code_block('line', implementation)) implementation = self.get_validator_block('id') code.append(self.create_code_block('if', implementation)) implementation = self.get_validator_block('core') code.append(self.create_code_block('if', implementation)) code.append(self.create_code_block('line', ['return total_errors'])) # return the parts return dict({'title_line': title_line, 'params': params, 'return_lines': return_lines, 'additional': additional, 'function': function, 'return_type': return_type, 'arguments': arguments, 'constant': False, 'virtual': True, 'object_name': self.struct_name, 'implementation': code}) # function to write read attributes # note not the standard read attributes function; this is specific to # the document plugin def write_read_attributes(self): if not self.is_doc_plugin: return # sort error names to be used error = '{0}AttributeRequiredMustBeBoolean'.format(self.package) req_error = '{0}AttributeRequiredMissing'.format(self.package) value_error = '{0}AttributeRequiredMustHaveValue'.format(self.package) # create comment parts title_line = 'Reads the {0} attributes in the top-level ' \ 'element.'.format(self.package) params = [] return_lines = [] additional = [] # create the function declaration if global_variables.is_package: arguments = ['const XMLAttributes& attributes', 'const ExpectedAttributes& expectedAttributes'] else: arguments = ['const LIBSBML_CPP_NAMESPACE_QUALIFIER XMLAttributes& attributes', 'const LIBSBML_CPP_NAMESPACE_QUALIFIER ExpectedAttributes& expectedAttributes'] function = 'readAttributes' return_type = 'void' # create the function implementation implementation = ['get{0}() != NULL && get{0}()->' 'getLevel() < ' '3'.format(global_variables.document_class), 'return'] code = [dict({'code_type': 'if', 'code': implementation})] if global_variables.is_package: triple = 'XMLTriple' else: triple = 'LIBSBML_CPP_NAMESPACE_QUALIFIER XMLTriple' implementation = ['{0}ErrorLog* log = getErrorLog' '()'.format(self.cap_language), 'unsigned int numErrs = log->getNumErrors()', '{0} tripleReqd(\"required\", mURI, ' 'getPrefix())'.format(triple), 'bool assigned = attributes.readInto(tripleReqd, ' 'mRequired)'] code.append(self.create_code_block('line', implementation)) implementation = ['log->getNumErrors() == numErrs + 1 && ' 'log->contains(XMLAttributeTypeMismatch)', 'log->remove(XMLAttributeTypeMismatch)', 'log->logPackageError(\"{0}\", {1}, ' 'getPackageVersion(), getLevel(), ' 'getVersion())'.format(self.package.lower(), error), 'else', 'log->logPackageError(\"{0}\", {1}, ' 'getPackageVersion(), getLevel(), ' 'getVersion())'.format(self.package.lower(), req_error) ] nested_if = self.create_code_block('if_else', implementation) implementation = ['mRequired != {0}'.format(self.required), 'log->logPackageError(\"{0}\", {1}, ' 'getPackageVersion(), getLevel(), ' 'getVersion())'.format(self.package.lower(), value_error) ] second_nested_if = self.create_code_block('if', implementation) implementation = ['assigned == false', nested_if, 'else', 'mIsSetRequired = true', second_nested_if] code.append(self.create_code_block('if_else', implementation)) # return the parts return dict({'title_line': title_line, 'params': params, 'return_lines': return_lines, 'additional': additional, 'function': function, 'return_type': return_type, 'arguments': arguments, 'constant': False, 'virtual': True, 'object_name': self.struct_name, 'implementation': code}) ######################################################################## # HELPER FUNCTIONS def get_validator_block(self, valid_id): bail_if = self.create_code_block('if', ['log->getNumFailsWithSeverity(LIB{0}' '_SEV_ERROR) > ' '0'.format(self.cap_language), 'return total_errors']) errors_if = self.create_code_block('if', ['nerrors > 0', 'log->add({0}_validator.get' 'Failures())'.format(valid_id), bail_if]) code_block = ['{0}'.format(valid_id), '{0}_validator.init()'.format(valid_id), 'nerrors = {0}_validator.validate(*doc)'.format(valid_id), 'total_errors += nerrors', errors_if] return code_block @staticmethod def create_code_block(code_type, lines): code = dict({'code_type': code_type, 'code': lines}) return code
hovo1990/deviser
generator/code_files/cpp_functions/GeneralFunctions.py
Python
lgpl-2.1
53,640
[ "VisIt" ]
187baa5fec0fec4f73f0f74758b80f41d9001ed7eefe570c19f2ebc9457b57f3
from . import speg_visitor as sv from . import speg_parser as sp from . import rd_parser as rd from . import exceptions as ex class SPEG(object): def __init__(self): self.parser = sp.SimplePegParser() self.visitor = sv.SimplePegActionsVisitor(SimplePegActions()) self.speg_parser = None def parse_grammar(self, grammar): self.speg_parser = None speg_ast = self.parser.parse(grammar) if speg_ast: self.speg_parser = self.visitor.visit(speg_ast) else: raise ex.GrammarParseError('Failed to parse grammar: \n\n' + self.parser.get_last_error()) def parse_text(self, text): if self.speg_parser: rules = self.speg_parser.children first_rule = rules[0] first_rule_parser = first_rule.parser state = rd.State(text=text, rules=rules) ast = first_rule_parser(state) if ast: return ast else: raise ex.TextParseError('Failed to parse text: \n\n' + rd.get_last_error(state)) else: raise Exception('You need grammar to parse text. Call parseGrammar first') def parse(self, grammar, text): speg_ast = self.parser.parse(grammar) if speg_ast: visitor = sv.SimplePegActionsVisitor(SimplePegActions()) generated_parser = visitor.visit(speg_ast) rules = generated_parser.children first_rule = rules[0] first_rule_parser = first_rule.parser state = rd.State(text=text, rules=rules) ast = first_rule_parser(state) if ast: return ast else: raise ex.TextParseError('Failed to parse text: \n\n' + rd.get_last_error(state)) else: raise ex.GrammarParseError('Failed to parse grammar: \n\n' + self.parser.get_last_error()) class SimplePegActions(object): def noop(self, node): return node def peg(self, node): return node.children[3] def parsing_body(self, node): node.children = [child.children[0] for child in node.children] return node def parsing_rule(self, node): rule = node.children[4] return rd.Node( name=node.children[0].match, parser=rule ) def parsing_expression(self, node): return node.children[0] def parsing_sequence(self, node): head = [node.children[0].children[0]] tail = [child.children[1].children[0] for child in node.children[1].children] return rd.sequence(head + tail) def parsing_ordered_choice(self, node): head = [node.children[0]] tail = [child.children[3] for child in node.children[1].children] return rd.ordered_choice(head + tail) def parsing_sub_expression(self, node): return node.children[0] def parsing_group(self, node): return node.children[2] def parsing_atomic_expression(self, node): return node.children[0] def parsing_not_predicate(self, node): return rd.not_predicate(node.children[1].children[0]) def parsing_and_predicate(self, node): return rd.and_predicate(node.children[1].children[0]) def parsing_zero_or_more(self, node): return rd.zero_or_more(node.children[0].children[0]) def parsing_one_or_more(self, node): return rd.one_or_more(node.children[0].children[0]) def parsing_optional(self, node): return rd.optional(node.children[0].children[0]) def parsing_string(self, node): return rd.string(node.children[1].match) def parsing_regex_char(self, node): return rd.regex_char(node.children[0].match) def parsing_rule_call(self, node): return rd.call_rule_by_name(node.match) def parsing_end_of_file(self, node): return rd.end_of_file()
SimplePEG/Python
simplepeg/speg.py
Python
mit
3,909
[ "VisIt" ]
bcf727c610f4369b2bc01c6a6165ae15c123a0dbc5f6055605fe9a835f942209
#!/usr/bin/env python3 import argparse import sys import os import pysam import re from pyfastaq import * import assembly_tools.fill_gaps_using_reference class Error (Exception): pass def make_fasta_of_gap_flanks(fasta_in, flanking_bases, fasta_out, gaps): '''Makes a fasta file of the sequences flanking the gaps in a fasta/q file''' tmp_tabfile = fasta_out + '.tmp.tab' tasks.get_seqs_flanking_gaps(fasta_in, tmp_tabfile, flanking_bases, flanking_bases) fin = utils.open_file_read(tmp_tabfile) fout = utils.open_file_write(fasta_out) original_line_length = sequences.Fasta.line_length sequences.Fasta.line_length = 0 for line in fin: if line.startswith('#'): continue gap = assembly_tools.fill_gaps_using_reference.gap.Gap(line) print(gap.left_fasta(), file=fout) print(gap.right_fasta(), file=fout) if gap.query_name not in gaps: gaps[gap.query_name] = {} gaps[gap.query_name][(gap.query_start, gap.query_end)] = gap utils.close(fin) utils.close(fout) os.unlink(tmp_tabfile) sequences.Fasta.line_length = original_line_length def paired_hit_samreader(filename): '''Given a SAM file in read name order, yields a tuple of hits ([left_hits], [right_hits])''' samfile = pysam.Samfile(filename, "r") left_hits = [] right_hits = [] for samrecord in samfile.fetch(until_eof=True): if len(left_hits) == 0: if not samrecord.qname.endswith('.left'): raise Error('Expecting to get a "left" read in SAM but got this:' + samrecord.qname) sys.exit(1) left_hits.append(samrecord) elif len(right_hits) > 0 and samrecord.qname.endswith('.left'): yield left_hits, right_hits, samfile left_hits = [samrecord] right_hits = [] elif samrecord.qname.endswith('.left'): left_hits.append(samrecord) elif samrecord.qname.endswith('.right'): right_hits.append(samrecord) else: raise Error('Unexpected error parsing SAM file. Cannot continue') sys.exit(1) yield left_hits, right_hits, samfile def _gap_flank_seqname_to_dict_key(name): regex = re.compile('^(.*):(\d+)-(\d+)\.(?:left|right)') hits = regex.search(name) try: name = hits.group(1) gap_start = int(hits.group(2)) - 1 gap_end = int(hits.group(3)) - 1 except: raise Error('Error getting gap start/end coords from sequence with name ' + name) return name, (gap_start, gap_end) def parse_sam_file(samfilename, gaps): samreader = paired_hit_samreader(samfilename) for left_hits, right_hits, samfile in samreader: # find gap corresponding to this pair of reads qry, coords = _gap_flank_seqname_to_dict_key(left_hits[0].qname) try: gaps[qry][coords].update_hits(left_hits, right_hits, samfile) except: raise Error('Error parsing line of SAM ' + left_hits[0])
martinghunt/Assembly_tools
assembly_tools/fill_gaps_using_reference/helper.py
Python
gpl-3.0
3,048
[ "pysam" ]
28467004dc2e0615db91ea2441717683879d227f846269c99033117b2ab76590
""" Getting started with Captum - Titanic Data Analysis """ # Initial imports import numpy as np import torch from captum.attr import IntegratedGradients from captum.attr import LayerConductance from captum.attr import NeuronConductance import matplotlib.pyplot as plt import pandas as pd from scipy import stats import mlflow from prettytable import PrettyTable from sklearn.model_selection import train_test_split import os from argparse import ArgumentParser import torch.nn as nn def get_titanic(): """ we now preprocess the data by converting some categorical features such as gender, location of embarcation, and passenger class into one-hot encodings We also remove some features that are more difficult to analyze After processing, the features we have are: Age: Passenger Age Sibsp: Number of Siblings / Spouses Aboard Parch: Number of Parents / Children Aboard Fare: Fare Amount Paid in British Pounds Female: Binary variable indicating whether passenger is female Male: Binary variable indicating whether passenger is male EmbarkC : Binary var indicating whether passenger embarked @ Cherbourg EmbarkQ : Binary var indicating whether passenger embarked @ Queenstown EmbarkS : Binary var indicating whether passenger embarked @ Southampton Class1 : Binary var indicating whether passenger was in first class Class2 : Binary var indicating whether passenger was in second class Class3 : Binary var indicating whether passenger was in third class url = "https://biostat.app.vumc.org/wiki/pub/Main/DataSets/titanic3.csv" """ url = "https://biostat.app.vumc.org/wiki/pub/Main/DataSets/titanic3.csv" titanic_data = pd.read_csv(url) titanic_data = pd.concat( [ titanic_data, pd.get_dummies(titanic_data["sex"]), pd.get_dummies(titanic_data["embarked"], prefix="embark"), pd.get_dummies(titanic_data["pclass"], prefix="class"), ], axis=1, ) titanic_data["age"] = titanic_data["age"].fillna(titanic_data["age"].mean()) titanic_data["fare"] = titanic_data["fare"].fillna(titanic_data["fare"].mean()) titanic_data = titanic_data.drop( [ "name", "ticket", "cabin", "boat", "body", "home.dest", "sex", "embarked", "pclass", ], axis=1, ) return titanic_data torch.manual_seed(1) # Set seed for reproducibility. class TitanicSimpleNNModel(nn.Module): def __init__(self): super().__init__() self.linear1 = nn.Linear(12, 12) self.sigmoid1 = nn.Sigmoid() self.linear2 = nn.Linear(12, 8) self.sigmoid2 = nn.Sigmoid() self.linear3 = nn.Linear(8, 2) self.softmax = nn.Softmax(dim=1) def forward(self, x): lin1_out = self.linear1(x) sigmoid_out1 = self.sigmoid1(lin1_out) sigmoid_out2 = self.sigmoid2(self.linear2(sigmoid_out1)) return self.softmax(self.linear3(sigmoid_out2)) def prepare(): RANDOM_SEED = 42 titanic_data = get_titanic() labels = titanic_data["survived"].to_numpy() titanic_data = titanic_data.drop(["survived"], axis=1) feature_names = list(titanic_data.columns) data = titanic_data.to_numpy() # Separate training and test sets using train_features, test_features, train_labels, test_labels = train_test_split( data, labels, test_size=0.3, random_state=RANDOM_SEED, stratify=labels ) return (train_features, train_labels, test_features, test_labels, feature_names) def count_model_parameters(model): table = PrettyTable(["Modules", "Parameters"]) total_params = 0 for name, parameter in model.named_parameters(): if not parameter.requires_grad: continue param = parameter.nonzero(as_tuple=False).size(0) table.add_row([name, param]) total_params += param return table, total_params def visualize_importances( feature_names, importances, title="Average Feature Importances", plot=True, axis_title="Features", ): print(title) feature_imp = PrettyTable(["feature_name", "importances"]) feature_imp_dict = {} for i in range(len(feature_names)): print(feature_names[i], ": ", "%.3f" % (importances[i])) feature_imp.add_row([feature_names[i], importances[i]]) feature_imp_dict[str(feature_names[i])] = importances[i] x_pos = np.arange(len(feature_names)) if plot: fig, ax = plt.subplots(figsize=(12, 6)) ax.bar(x_pos, importances, align="center") ax.set(title=title, xlabel=axis_title) ax.set_xticks(x_pos) ax.set_xticklabels(feature_names, rotation="vertical") mlflow.log_figure(fig, title + ".png") return feature_imp, feature_imp_dict def train(USE_PRETRAINED_MODEL=False): net = TitanicSimpleNNModel() train_features, train_labels, test_features, test_labels, feature_names = prepare() USE_PRETRAINED_MODEL = dict_args["use_pretrained_model"] if USE_PRETRAINED_MODEL: net.load_state_dict(torch.load("models/titanic_state_dict.pt")) net.eval() print("Model Loaded!") else: criterion = nn.CrossEntropyLoss() num_epochs = dict_args["max_epochs"] mlflow.log_param("epochs", num_epochs) mlflow.log_param("lr", dict_args["lr"]) optimizer = torch.optim.Adam(net.parameters(), lr=dict_args["lr"]) input_tensor = torch.from_numpy(train_features).type(torch.FloatTensor) label_tensor = torch.from_numpy(train_labels) for epoch in range(num_epochs): output = net(input_tensor) loss = criterion(output, label_tensor) optimizer.zero_grad() loss.backward() optimizer.step() if epoch % 50 == 0: print( "Epoch {}/{} => Train Loss: {:.2f}".format(epoch + 1, num_epochs, loss.item()) ) mlflow.log_metric( "Epoch {} Loss".format(str(epoch + 1)), float(loss.item()), step=epoch, ) if not os.path.isdir("models"): os.makedirs("models") torch.save(net.state_dict(), "models/titanic_state_dict.pt") summary, _ = count_model_parameters(net) mlflow.log_text(str(summary), "model_summary.txt") return ( net, train_features, train_labels, test_features, test_labels, feature_names, ) def compute_accuracy(net, features, labels, title=None): input_tensor = torch.from_numpy(features).type(torch.FloatTensor) out_probs = net(input_tensor).detach().numpy() out_classes = np.argmax(out_probs, axis=1) mlflow.log_metric(title, float(sum(out_classes == labels) / len(labels))) print(title, sum(out_classes == labels) / len(labels)) return input_tensor def feature_conductance(net, test_input_tensor): """ The method takes tensor(s) of input examples (matching the forward function of the model), and returns the input attributions for the given input example. The returned values of the attribute method are the attributions, which match the size of the given inputs, and delta, which approximates the error between the approximated integral and true integral. This method saves the distribution of avg attributions of the trained features for the given target. """ ig = IntegratedGradients(net) test_input_tensor.requires_grad_() attr, _ = ig.attribute(test_input_tensor, target=1, return_convergence_delta=True) attr = attr.detach().numpy() # To understand these attributions, we can first average them across all the inputs and print and visualize the average attribution for each feature. feature_imp, feature_imp_dict = visualize_importances(feature_names, np.mean(attr, axis=0)) mlflow.log_metrics(feature_imp_dict) mlflow.log_text(str(feature_imp), "feature_imp_summary.txt") fig, (ax1, ax2) = plt.subplots(2, 1) fig.tight_layout(pad=3) ax1.hist(attr[:, 1], 100) ax1.set(title="Distribution of Sibsp Attribution Values") # we can bucket the examples by the value of the sibsp feature and plot the average attribution for the feature. # In the plot below, the size of the dot is proportional to the number of examples with that value. bin_means, bin_edges, _ = stats.binned_statistic( test_features[:, 1], attr[:, 1], statistic="mean", bins=6 ) bin_count, _, _ = stats.binned_statistic( test_features[:, 1], attr[:, 1], statistic="count", bins=6 ) bin_width = bin_edges[1] - bin_edges[0] bin_centers = bin_edges[1:] - bin_width / 2 ax2.scatter(bin_centers, bin_means, s=bin_count) ax2.set(xlabel="Average Sibsp Feature Value", ylabel="Average Attribution") mlflow.log_figure(fig, "Average_Sibsp_Feature_Value.png") def layer_conductance(net, test_input_tensor): """ To use Layer Conductance, we create a LayerConductance object passing in the model as well as the module (layer) whose output we would like to understand. In this case, we choose net.sigmoid1, the output of the first hidden layer. Now obtain the conductance values for all the test examples by calling attribute on the LayerConductance object. LayerConductance also requires a target index for networks with mutliple outputs, defining the index of the output for which gradients are computed. Similar to feature attributions, we provide target = 1, corresponding to survival. LayerConductance also utilizes a baseline, but we simply use the default zero baseline as in integrated gradients. """ cond = LayerConductance(net, net.sigmoid1) cond_vals = cond.attribute(test_input_tensor, target=1) cond_vals = cond_vals.detach().numpy() # We can begin by visualizing the average conductance for each neuron. neuron_names = ["neuron " + str(x) for x in range(12)] avg_neuron_imp, neuron_imp_dict = visualize_importances( neuron_names, np.mean(cond_vals, axis=0), title="Average Neuron Importances", axis_title="Neurons", ) mlflow.log_metrics(neuron_imp_dict) mlflow.log_text(str(avg_neuron_imp), "neuron_imp_summary.txt") # We can also look at the distribution of each neuron's attributions. Below we look at the distributions for neurons 7 and 9, # and we can confirm that their attribution distributions are very close to 0, suggesting they are not learning substantial features. fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(9, 6)) fig.tight_layout(pad=3) ax1.hist(cond_vals[:, 9], 100) ax1.set(title="Neuron 9 Distribution") ax2.hist(cond_vals[:, 7], 100) ax2.set(title="Neuron 7 Distribution") mlflow.log_figure(fig, "Neurons_Distribution.png") def neuron_conductance(net, test_input_tensor, neuron_selector=None): """ We have identified that some of the neurons are not learning important features, while others are. Can we now understand what each of these important neurons are looking at in the input? For instance, are they identifying different features in the input or similar ones? To answer these questions, we can apply the third type of attributions available in Captum, **Neuron Attributions**. This allows us to understand what parts of the input contribute to activating a particular input neuron. For this example, we will apply Neuron Conductance, which divides the neuron's total conductance value into the contribution from each individual input feature. To use Neuron Conductance, we create a NeuronConductance object, analogously to Conductance, passing in the model as well as the module (layer) whose output we would like to understand, in this case, net.sigmoid1, as before. """ neuron_selector = 0 neuron_cond = NeuronConductance(net, net.sigmoid1) # We can now obtain the neuron conductance values for all the test examples by calling attribute on the NeuronConductance object. # Neuron Conductance requires the neuron index in the target layer for which attributions are requested as well as the target index for networks with mutliple outputs, # similar to layer conductance. As before, we provide target = 1, corresponding to survival, and compute neuron conductance for neurons 0 and 10, the significant neurons identified above. # The neuron index can be provided either as a tuple or as just an integer if the layer output is 1-dimensional. neuron_cond_vals = neuron_cond.attribute( test_input_tensor, neuron_selector=neuron_selector, target=1 ) neuron_cond, _ = visualize_importances( feature_names, neuron_cond_vals.mean(dim=0).detach().numpy(), title="Average Feature Importances for Neuron {}".format(neuron_selector), ) mlflow.log_text( str(neuron_cond), "Avg_Feature_Importances_Neuron_" + str(neuron_selector) + ".txt" ) if __name__ == "__main__": parser = ArgumentParser(description="Titanic Captum Example") parser.add_argument( "--use_pretrained_model", default=False, metavar="N", help="Use pretrained model or train from the scratch", ) parser.add_argument( "--max_epochs", type=int, default=100, metavar="N", help="Number of epochs to be used for training", ) parser.add_argument( "--lr", type=float, default=0.1, metavar="LR", help="learning rate (default: 0.1)", ) args = parser.parse_args() dict_args = vars(args) with mlflow.start_run(run_name="Titanic_Captum_mlflow"): net, train_features, train_labels, test_features, test_labels, feature_names = train() compute_accuracy(net, train_features, train_labels, title="Train Accuracy") test_input_tensor = compute_accuracy(net, test_features, test_labels, title="Test Accuracy") feature_conductance(net, test_input_tensor) layer_conductance(net, test_input_tensor) neuron_conductance(net, test_input_tensor) mlflow.log_param("Train Size", len(train_labels)) mlflow.log_param("Test Size", len(test_labels))
mlflow/mlflow
examples/pytorch/CaptumExample/Titanic_Captum_Interpret.py
Python
apache-2.0
14,395
[ "NEURON" ]
f84aed658c4312dc7bbea97780fae48535405ca7cb105354225d1f2b3da3b0ff
#!/usr/bin/env python # This script compares the two variants of the Tsodyks/Markram synapse in NEST. import cynest as nest import cynest.voltage_trace nest.ResetKernel() #Parameter set for depression dep_params={"U":0.67, "weight":250.} # parameter set for facilitation fac_params={"U":0.1, "tau_fac":1000.,"tau_rec":100.,"weight":250.} # Here we assign the parameter set to the synapse models t1_params=fac_params # for tsodyks_synapse t2_params=t1_params.copy() # for tsodyks2_synapse nest.SetDefaults("tsodyks_synapse",t1_params) nest.SetDefaults("tsodyks2_synapse",t2_params) nest.SetDefaults("iaf_psc_exp",{"tau_syn_ex": 3.}) neuron = nest.Create("iaf_psc_exp",3) nest.Connect([neuron[0]],[neuron[1]],model="tsodyks_synapse") nest.Connect([neuron[0]],[neuron[2]],model="tsodyks2_synapse") voltmeter = nest.Create("voltmeter",2) nest.SetStatus(voltmeter, {"withgid": True, "withtime": True}) nest.Connect([voltmeter[0]], [neuron[1]]) nest.Connect([voltmeter[1]], [neuron[2]]) nest.SetStatus([neuron[0]], "I_e", 376.0) nest.Simulate(500.0) nest.SetStatus([neuron[0]], "I_e", 0.0) nest.Simulate(800.0) nest.SetStatus([neuron[0]], "I_e", 376.0) nest.Simulate(500.0) nest.SetStatus([neuron[0]], "I_e", 0.0) nest.Simulate(100.0) nest.voltage_trace.from_device([voltmeter[0]]) nest.voltage_trace.from_device([voltmeter[1]]) nest.voltage_trace.show()
gewaltig/cython-neuron
cynest/examples/test_tsodyks2_synapse.py
Python
gpl-2.0
1,367
[ "NEURON" ]
45e33571f02fc9624cb3d6a3001350c067cf9ee95a7d90a521a1def65fccba55
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*- # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 # # MDAnalysis --- https://www.mdanalysis.org # Copyright (c) 2006-2020 The MDAnalysis Development Team and contributors # (see the file AUTHORS for the full list of names) # # Released under the GNU Public Licence, v2 or any higher version # # Please cite your use of MDAnalysis in published work: # # R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler, # D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein. # MDAnalysis: A Python package for the rapid analysis of molecular dynamics # simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th # Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy. # doi: 10.25080/majora-629e541a-00e # # N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein. # MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations. # J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787 # """ HELANAL --- analysis of protein helices ======================================= :Author: Lily Wang :Year: 2020 :Copyright: GNU Public License v3 .. versionadded:: 2.0.0 This module contains code to analyse protein helices using the HELANAL_ algorithm ([Bansal2000]_ , [Sugeta1967]_ ). HELANAL_ quantifies the geometry of helices in proteins on the basis of their Cα atoms. It can determine local structural features such as the local helical twist and rise, virtual torsion angle, local helix origins and bending angles between successive local helix axes. .. _HELANAL: https://pubmed.ncbi.nlm.nih.gov/10798526/ .. [Sugeta1967] Sugeta, H. and Miyazawa, T. 1967. General method for calculating helical parameters of polymer chains from bond lengths, bond angles and internal rotation angles. *Biopolymers* 5 673 - 679 .. [Bansal2000] Bansal M, Kumar S, Velavan R. 2000. HELANAL - A program to characterise helix geometry in proteins. *J Biomol Struct Dyn.* 17(5):811-819. Example use ----------- You can pass in a single selection:: import MDAnalysis as mda from MDAnalysis.tests.datafiles import PSF, DCD from MDAnalysis.analysis import helix_analysis as hel u = mda.Universe(PSF, DCD) helanal = hel.HELANAL(u, select='name CA and resnum 161-187') helanal.run() All computed properties are available in ``.results``:: print(helanal.results.summary) Alternatively, you can analyse several helices at once by passing in multiple selection strings:: helanal2 = hel.HELANAL(u, select=('name CA and resnum 100-160', 'name CA and resnum 200-230')) The :func:`helix_analysis` function will carry out helix analysis on atom positions, treating each row of coordinates as an alpha-carbon equivalent:: hel_xyz = hel.helix_analysis(u.atoms.positions, ref_axis=[0, 0, 1]) """ import warnings import numpy as np import MDAnalysis as mda from ..lib import util, mdamath from .base import AnalysisBase def vector_of_best_fit(coordinates): """Fit vector through the centered coordinates, pointing to the first coordinate (i.e. upside-down). Parameters ---------- coordinates : :class:`numpy.ndarray` of shape (N, 3) Returns ------- :class:`numpy.ndarray` of shape (3,) Vector of best fit. """ centered = coordinates - coordinates.mean(axis=0) Mt_M = np.matmul(centered.T, centered) u, s, vh = np.linalg.linalg.svd(Mt_M) vector = vh[0] # does vector face first local helix origin? angle = mdamath.angle(centered[0], vector) if angle > np.pi/2: vector *= -1 return vector def local_screw_angles(global_axis, ref_axis, helix_directions): """ Cylindrical azimuth angles between the local direction vectors, as projected onto the cross-section of the helix, from (-pi, pi]. The origin (angle=0) is set to the plane of global_axis and ref_axis. Parameters ---------- global_axis : :class:`numpy.ndarray` of shape (3,) Vector of best fit. Screw angles are calculated perpendicular to this axis. ref_axis : :class:`numpy.ndarray` of shape (3,) Reference length-wise axis. One of the reference vectors is orthogonal to this axis. helix_directions : :class:`numpy.ndarray` of shape (N, 3) array of vectors representing the local direction of each helix window. Returns ------- :class:`numpy.ndarray` of shape (N,) Array of screw angles. """ global_axis = np.asarray(global_axis) # normal to the plane of `ref_axis` & `global_axis` perp = np.cross(ref_axis, global_axis) if not np.any(perp): # zero when ref_axis, global_axis parallel # use random orthogonal vector new_ref = [[1, 0, 0], [0, 0, 1]] while not np.any(perp) and new_ref: perp = np.cross(new_ref.pop(), global_axis) # normal for angle to plane of perp and global_axis ortho = np.cross(-perp, global_axis) # project helix_directions onto global to remove contribution norm_global_sq = np.dot(global_axis, global_axis) mag_g = np.matmul(global_axis, helix_directions.T)/norm_global_sq # projection onto global_axis proj_g = mag_g.reshape(-1, 1) @ global_axis.reshape(1, -1) # projection onto plane w/o global_axis contribution proj_plane = helix_directions - proj_g # angles from projection to perp refs = np.array([perp, ortho]) # (2, 3) norms = _, ortho_norm = np.outer(mdamath.pnorm(refs), mdamath.pnorm(proj_plane)) cos = cos_perp, cos_ortho = np.matmul(refs, proj_plane.T)/norms to_perp, to_ortho = np.arccos(np.clip(cos, -1, 1)) # (2, n_vec) to_ortho[ortho_norm == 0] = 0 # ? to_ortho[cos_perp < 0] *= -1 to_ortho[to_ortho == -np.pi] = np.pi # leave 180 alone return np.rad2deg(to_ortho) def helix_analysis(positions, ref_axis=[0, 0, 1]): r""" Calculate helix properties from atomic coordinates. Each property is calculated from a sliding window of 4 atoms, from i to i+3. Any property whose name begins with 'local' is a property of a sliding window. Parameters ---------- positions : :class:`numpy.ndarray` of shape (N, 3) Atomic coordinates. ref_axis : array-like of length 3, optional The reference axis used to calculate the tilt of the vector of best fit, and the local screw angles. Returns ------- dict with the following keys: local_twists : array, shape (N-3,) local twist angle from atom i+1 to i+2 local_nres_per_turn : array, shape (N-3,) number of residues per turn, based on local_twist local_axes : array, shape (N-3, 3) the length-wise helix axis of the local window local_bends : array, shape (N-6,) the angles between local helix angles, 3 windows apart local_heights : array, shape (N-3,) the rise of each local helix local_helix_directions : array, shape (N-2, 3) the unit vector from each local origin to atom i+1 local_origins : array, shape (N-2, 3) the projected origin for each helix all_bends : array, shape (N-3, N-3) angles between each local axis global_axis : array, shape (3,) vector of best fit through origins, pointing at the first origin. local_screw_angles : array, shape (N-2,) cylindrical azimuth angle to plane of global_axis and ref_axis """ # ^ ^ # \ / bi # \ / # CA_i+2 <----- CA_i+1 # / \ / ^ # / r \ / \ # V / \ θ / \ # / \ / CA_i # v origin # CA_i+3 # # V: vectors # bi: approximate "bisectors" in plane of screen # Note: not real bisectors, as the vectors aren't normalised # θ: local_twists # origin: origins # local_axes: perpendicular to plane of screen. Orthogonal to "bisectors" vectors = positions[1:] - positions[:-1] # (n_res-1, 3) bisectors = vectors[:-1] - vectors[1:] # (n_res-2, 3) bimags = mdamath.pnorm(bisectors) # (n_res-2,) adjacent_mag = bimags[:-1] * bimags[1:] # (n_res-3,) # find angle between bisectors for twist and n_residue/turn cos_theta = mdamath.pdot(bisectors[:-1], bisectors[1:])/adjacent_mag cos_theta = np.clip(cos_theta, -1, 1) twists = np.arccos(cos_theta) # (n_res-3,) local_twists = np.rad2deg(twists) local_nres_per_turn = 2*np.pi / twists # find normal to bisectors for local axes cross_bi = np.cross(bisectors[:-1], bisectors[1:]) # (n_res-3, 3) local_axes = (cross_bi.T / mdamath.pnorm(cross_bi)).T # (n_res-3, 3) local_axes = np.nan_to_num(local_axes) zero_vectors = np.tile(np.any(local_axes, axis=1), (len(local_axes), 1)).T # find angles between axes for bends bend_theta = np.matmul(local_axes, local_axes.T) # (n_res-3, n_res-3) # set angles to 0 between zero-vectors bend_theta = np.where(zero_vectors+zero_vectors.T, # (n_res-3, n_res-3) bend_theta, 1) bend_matrix = np.rad2deg(np.arccos(np.clip(bend_theta, -1, 1))) # local bends are between axes 3 windows apart local_bends = np.diagonal(bend_matrix, offset=3) # (n_res-6,) # radius of local cylinder radii = (adjacent_mag**0.5) / (2*(1.0-cos_theta)) # (n_res-3,) # special case: angle b/w bisectors is 0 (should virtually never happen) # guesstimate radius = half bisector magnitude radii = np.where(cos_theta != 1, radii, (adjacent_mag**0.5)/2) # height of local cylinder heights = np.abs(mdamath.pdot(vectors[1:-1], local_axes)) # (n_res-3,) local_helix_directions = (bisectors.T/bimags).T # (n_res-2, 3) # get origins by subtracting radius from atom i+1 origins = positions[1:-1].copy() # (n_res-2, 3) origins[:-1] -= (radii*local_helix_directions[:-1].T).T # subtract radius from atom i+2 in last one origins[-1] -= radii[-1]*local_helix_directions[-1] helix_axes = vector_of_best_fit(origins) screw = local_screw_angles(helix_axes, np.asarray(ref_axis), local_helix_directions) results = {'local_twists': local_twists, 'local_nres_per_turn': local_nres_per_turn, 'local_axes': local_axes, 'local_bends': local_bends, 'local_heights': heights, 'local_helix_directions': local_helix_directions, 'local_origins': origins, 'all_bends': bend_matrix, 'global_axis': helix_axes, 'local_screw_angles': screw} return results class HELANAL(AnalysisBase): r""" Perform HELANAL helix analysis on your trajectory. Parameters ---------- universe : Universe or AtomGroup The Universe or AtomGroup to apply the analysis to. select : str or iterable of str, optional The selection string to create an atom selection that the HELANAL analysis is applied to. Note that HELANAL is designed to work on the alpha-carbon atoms of protein residues. If you pass in multiple selections, the selections will be analysed separately. ref_axis : array-like of length 3, optional The reference axis used to calculate the tilt of the vector of best fit, and the local screw angles. flatten_single_helix : bool, optional Whether to flatten results if only one selection is passed. split_residue_sequences : bool, optional Wether to split residues into a list of same or consecutive helices. verbose : bool, optional Turn on more logging and debugging. Attributes ---------- results.local_twists : array or list of arrays The local twist angle from atom i+1 to i+2. Each array has shape (n_frames, n_residues-3) results.local_nres_per_turn : array or list of arrays Number of residues per turn, based on local_twist. Each array has shape (n_frames, n_residues-3) results.local_axes : array or list of arrays The length-wise helix axis of the local window. Each array has shape (n_frames, n_residues-3, 3) results.local_heights : array or list of arrays The rise of each local helix. Each array has shape (n_frames, n_residues-3) results.local_helix_directions : array or list of arrays The unit vector from each local origin to atom i+1. Each array has shape (n_frames, n_residues-2, 3) results.local_origins :array or list of arrays The projected origin for each helix. Each array has shape (n_frames, n_residues-2, 3) results.local_screw_angles : array or list of arrays The local screw angle for each helix. Each array has shape (n_frames, n_residues-2) results.local_bends : array or list of arrays The angles between local helix axes, 3 windows apart. Each array has shape (n_frames, n_residues-6) results.all_bends : array or list of arrays The angles between local helix axes. Each array has shape (n_frames, n_residues-3, n_residues-3) results.global_axis : array or list of arrays The length-wise axis for the overall helix. This points at the first helix window in the helix, so it runs opposite to the direction of the residue numbers. Each array has shape (n_frames, 3) results.global_tilts : array or list of arrays The angle between the global axis and the reference axis. Each array has shape (n_frames,) results.summary : dict or list of dicts Summary of stats for each property: the mean, the sample standard deviation, and the mean absolute deviation. """ # shapes of properties from each frame, relative to n_residues attr_shapes = { 'local_twists': (-3,), 'local_bends': (-6,), 'local_heights': (-3,), 'local_nres_per_turn': (-3,), 'local_origins': (-2, 3), 'local_axes': (-3, 3), 'local_helix_directions': (-2, 3), 'local_screw_angles': (-2,), } def __init__(self, universe, select='name CA', ref_axis=[0, 0, 1], verbose=False, flatten_single_helix=True, split_residue_sequences=True): super(HELANAL, self).__init__(universe.universe.trajectory, verbose=verbose) selections = util.asiterable(select) atomgroups = [universe.select_atoms(s) for s in selections] consecutive = [] # check that residues are consecutive and long enough sequence for s, ag in zip(selections, atomgroups): groups = util.group_same_or_consecutive_integers(ag.resindices) counter = 0 if len(groups) > 1: msg = 'Your selection {} has gaps in the residues.'.format(s) if split_residue_sequences: msg += ' Splitting into {} helices.'.format(len(groups)) else: groups = [ag.resindices] warnings.warn(msg) for g in groups: ng = len(g) counter += ng if ng < 9: warnings.warn('Fewer than 9 atoms found for helix in ' 'selection {} with these resindices: {}. ' 'This sequence will be skipped. HELANAL ' 'is designed to work on at sequences of ' '≥9 residues.'.format(s, g)) continue ids, counts = np.unique(g, return_counts=True) if np.any(counts > 1): dup = ', '.join(map(str, ids[counts > 1])) warnings.warn('Your selection {} includes multiple atoms ' 'for residues with these resindices: {}.' 'HELANAL is designed to work on one alpha-' 'carbon per residue.'.format(s, dup)) consecutive.append(ag[counter-ng:counter]) self.atomgroups = consecutive self.ref_axis = np.asarray(ref_axis) self._flatten = flatten_single_helix def _zeros_per_frame(self, dims, n_positions=0): """Create zero arrays where first 2 dims are n_frames, n_values""" first = dims[0] + n_positions npdims = (self.n_frames, first,) + dims[1:] # py27 workaround return np.zeros(npdims, dtype=np.float64) def _prepare(self): n_res = [len(ag) for ag in self.atomgroups] for key, dims in self.attr_shapes.items(): empty = [self._zeros_per_frame( dims, n_positions=n) for n in n_res] self.results[key] = empty self.results.global_axis = [self._zeros_per_frame((3,)) for n in n_res] self.results.all_bends = [self._zeros_per_frame((n-3, n-3)) for n in n_res] def _single_frame(self): _f = self._frame_index for i, ag in enumerate(self.atomgroups): results = helix_analysis(ag.positions, ref_axis=self.ref_axis) for key, value in results.items(): attr = self.results[key] attr[i][_f] = value def _conclude(self): # compute tilt of global axes self.results.global_tilts = tilts = [] norm_ref = (self.ref_axis**2).sum() ** 0.5 for axes in self.results.global_axis: cos = np.matmul(self.ref_axis, axes.T) / \ (mdamath.pnorm(axes)*norm_ref) cos = np.clip(cos, -1.0, 1.0) tilts.append(np.rad2deg(np.arccos(cos))) global_attrs = ['global_axis', 'global_tilts', 'all_bends'] attrnames = list(self.attr_shapes.keys()) + global_attrs # summarise self.results.summary = [] for i in range(len(self.atomgroups)): stats = {} for name in attrnames: attr = self.results[name] mean = attr[i].mean(axis=0) dev = np.abs(attr[i]-mean) stats[name] = {'mean': mean, 'sample_sd': attr[i].std(axis=0, ddof=1), 'abs_dev': dev.mean(axis=0)} self.results.summary.append(stats) # flatten? if len(self.atomgroups) == 1 and self._flatten: for name in attrnames + ['summary']: attr = self.results[name] self.results[name] = attr[0] def universe_from_origins(self): """ Create MDAnalysis Universe from the local origins. Returns ------- Universe or list of Universes """ try: origins = self.results.local_origins except AttributeError: raise ValueError('Call run() before universe_from_origins') if not isinstance(origins, list): origins = [origins] universe = [] for xyz in origins: n_res = xyz.shape[1] u = mda.Universe.empty(n_res, n_residues=n_res, atom_resindex=np.arange(n_res), trajectory=True).load_new(xyz) universe.append(u) if not isinstance(self.results.local_origins, list): universe = universe[0] return universe
MDAnalysis/mdanalysis
package/MDAnalysis/analysis/helix_analysis.py
Python
gpl-2.0
19,741
[ "MDAnalysis" ]
4aa90aed5abdc24be28ed59d196873a43e10012d7d0a33c1ed3e88722ff5b5f0
# # Copyright 2016 The BigDL Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import sys from optparse import OptionParser import bigdl.orca.data.pandas from bigdl.orca import init_orca_context, stop_orca_context def process_feature(df, awake_begin=6, awake_end=23): import pandas as pd df['datetime'] = pd.to_datetime(df['timestamp']) df['hours'] = df['datetime'].dt.hour df['awake'] = (((df['hours'] >= awake_begin) & (df['hours'] <= awake_end)) | (df['hours'] == 0)).astype(int) return df if __name__ == "__main__": parser = OptionParser() parser.add_option("-f", type=str, dest="file_path", help="The file path to be read") parser.add_option("--deploy-mode", type=str, dest="deployMode", default="local", help="deploy mode, local, spark-submit, yarn-client or yarn-cluster") (options, args) = parser.parse_args(sys.argv) sc = init_orca_context(cluster_mode=options.deployMode) # read data file_path = options.file_path data_shard = bigdl.orca.data.pandas.read_csv(file_path) data = data_shard.collect() # repartition data_shard = data_shard.repartition(2) # apply function on each element trans_data_shard = data_shard.transform_shard(process_feature) data2 = trans_data_shard.collect() stop_orca_context()
intel-analytics/BigDL
python/orca/example/data/spark_pandas.py
Python
apache-2.0
1,851
[ "ORCA" ]
9476231189a1dff01fc0892edb8049cae6349e89d3f9a9899653f512a7f5dd49
# # # Tutorial Yambo School. Lausanne, 24-28 April 2017 # Convergence GW on hexagonal BN # Alejandro Molina-Sanchez & Henrique P. C. Miranda # # from __future__ import print_function import sys from yambopy import * from qepy import * from schedulerpy import * import argparse yambo = 'yambo' p2y = 'p2y' prefix = 'bn' bash = Scheduler.factory def create_save(): #check if the nscf cycle is present if os.path.isdir('nscf/%s.save'%prefix): print('nscf calculation found!') else: print('nscf calculation not found!') exit() #check if the SAVE folder is present if not os.path.isdir('database'): print('preparing yambo database') shell = bash() shell.add_command('mkdir -p database') shell.add_command('cd nscf/%s.save; %s; %s'%(prefix, p2y, yambo)) shell.add_command('mv SAVE ../../database/') shell.run() shell.clean() def gw_convergence(): #create the folder to run the calculation if not os.path.isdir('gw_conv'): shell = bash() shell.add_command('mkdir -p gw_conv') shell.add_command('cp -r database/SAVE gw_conv/') shell.run() shell.clean() y = YamboIn('%s -p p -g n -V all'%yambo,folder='gw_conv') k_f = y['QPkrange'][0][1] # Read the last k-points in the uniform k-grid y['BndsRnXp'] = [[1,10],''] # Screening. Number of bands y['NGsBlkXp'] = [0,'Ry'] # Cutoff Screening y['GbndRnge'] = [[1,10],''] # Self-energy. Number of bands y['QPkrange'] = [ [k_f,k_f,4,5], '' ] conv = { 'EXXRLvcs': [[10,10,20,40,60,80,100],'Ry'], 'NGsBlkXp': [[0,0,1,2,3], 'Ry'], 'BndsRnXp': [[[1,10],[1,10],[1,15],[1,20],[1,30]],''] , 'GbndRnge': [[[1,10],[1,10],[1,15],[1,20],[1,30]],''] } def run(filename): """ Function to be called by the optimize function """ folder = filename.split('.')[0] print(filename,folder) shell = bash() shell.add_command('cd gw_conv') shell.add_command('rm -f *.json %s/o-*'%folder) #cleanup shell.add_command('%s -F %s -J %s -C %s 2> %s.log'%(yambo,filename,folder,folder,folder)) shell.run() shell.clean() y.optimize(conv,run=run,ref_run=False) def plot_convergence(): y = YamboIn('%s -d -g n -V all'%yambo,folder='gw_conv') k_f = y['QPkrange'][0][1] # Read last k-points in the uniform k-grid print (k_f) #pack the files in .json files pack_files_in_folder('gw_conv') print('Select the converged value for each variable') shell = bash() shell.add_command('yambopy analysegw -bc 5 -kc %s -bv 4 -kv %s gw_conv EXXRLvcs' % (k_f, k_f)) shell.add_command('yambopy analysegw -bc 5 -kc %s -bv 4 -kv %s gw_conv NGsBlkXp' % (k_f, k_f)) shell.add_command('yambopy analysegw -bc 5 -kc %s -bv 4 -kv %s gw_conv BndsRnXp' % (k_f, k_f)) shell.add_command('yambopy analysegw -bc 5 -kc %s -bv 4 -kv %s gw_conv GbndRnge' % (k_f, k_f)) shell.run() shell.clean() def gw(): #create the folder to run the calculation if not os.path.isdir('gw'): shell = bash() shell.add_command('mkdir -p gw') shell.add_command('cp -r database/SAVE gw/') shell.run() shell.clean() # GW calculation. PPA Screening. Newton method y = YamboIn('%s -p p -g n -V all'%yambo,folder='gw') y['EXXRLvcs'] = [80,'Ry'] # Self-energy. Exchange y['NGsBlkXp'] = [1,25] # Screening. Number of bands y['NGsBlkXp'] = [3,'Ry'] # Cutoff Screening y['GbndRnge'] = [1,25] # Self-energy. Number of bands #read values from QPkrange values, units = y['QPkrange'] kpoint_start, kpoint_end, band_start, band_end = values #set the values of QPkrange y['QPkrange'] = [kpoint_start,kpoint_end,2,6] y.write('gw/yambo_gw.in') print('calculating...') shell = bash() shell.add_command('cd gw') shell.add_command('rm -f *.json gw/o-*') #cleanup shell.add_command('%s -F yambo_gw.in -J gw -C gw' % yambo) shell.run() shell.clean() def plot_gw(): #pack the files in .json files pack_files_in_folder('gw') #plot the results using yambm analyser ya = YamboAnalyser('gw') print('plot all qpoints') ya.plot_gw('qp') print('plot along a path') path = [[[0, 0, 0],'$\Gamma$'], [[0.5, 0, 0],'M'], [[0.3333,0.3333, 0.0],'K'], [[0.0, 0.0, 0.0],'$\Gamma$']] ya.plot_gw_path('qp',path, cols=(lambda x: x[2]+x[3],2)) def xi(): #create the folder to run the calculation if not os.path.isdir('gw-xi'): shell = bash() shell.add_command('mkdir -p gw-xi') shell.add_command('cp -r database/SAVE gw-xi/') shell.run() shell.clean() print ("Running COHSEX in folder 'gw-xi/coh'") cohsex = YamboIn('%s -p c -g n -V all'%yambo,folder='gw-xi') cohsex['EXXRLvcs'] = [80,'Ry'] # Self-energy. Exchange cohsex['NGsBlkXs'] = [1,25] # Screening. Number of bands cohsex['NGsBlkXs'] = [3,'Ry'] # Cutoff Screening cohsex['GbndRnge'] = [1,25] # Self-energy. Number of bands cohsex['QPkrange'][0][2:] = [2,6] cohsex.write('gw-xi/yambo_cohsex.in') shell = bash() shell.add_command('cd gw-xi') shell.add_command('rm -f coh.json coh/o-coh*') #cleanup shell.add_command('%s -F yambo_cohsex.in -J coh -C coh' % yambo) shell.run() shell.clean() print ("Running COHSEX in folder 'gw-xi/pp'") ppa = YamboIn('%s -p p -g n -V all'%yambo,folder='gw-xi') ppa['EXXRLvcs'] = [80,'Ry'] # Self-energy. Exchange ppa['NGsBlkXp'] = [1,25] # Screening. Number of bands ppa['NGsBlkXp'] = [3,'Ry'] # Cutoff Screening ppa['GbndRnge'] = [1,25] # Self-energy. Number of bands ppa['QPkrange'][0][2:] = [2, 6] # QP range. All BZ ppa.write('gw-xi/yambo_ppa.in') shell = bash() shell.add_command('cd gw-xi') shell.add_command('rm -f pp.json pp/o-pp*') #cleanup shell.add_command('%s -F yambo_ppa.in -J pp -C pp' % yambo) shell.run() shell.clean() print ("Running Real Axis in folder 'gw-xi/ra'") ra = YamboIn('%s -d -g n -V all'%yambo,folder='gw-xi') ra['EXXRLvcs'] = [80,'Ry'] # Self-energy. Exchange ra['NGsBlkXd'] = [1,25] # Screening. Number of bands ra['NGsBlkXd'] = [3,'Ry'] # Cutoff Screening ra['GbndRnge'] = [1,25] # Self-energy. Number of bands ra['QPkrange'][0][2:] = [2, 6] # QP range. All BZ ra.write('gw-xi/yambo_ra.in') shell = bash() shell.add_command('cd gw-xi') shell.add_command('rm -f ra.json ra/o-ra*') #cleanup shell.add_command('%s -F yambo_ra.in -J ra -C ra' % yambo) shell.run() shell.clean() def plot_xi(): #pack the files in .json files pack_files_in_folder('gw-xi') ya = YamboAnalyser('gw-xi') print('Plot Band structure for COHSEX, PPA and RA') path = [[[0, 0, 0],'$\Gamma$'], [[0.5, 0, 0],'M'], [[0.3333,0.3333, 0.0],'K'], [[0.0, 0.0, 0.0],'$\Gamma$']] ya.plot_gw_path('qp',path, cols=(lambda x: x[2]+x[3],)) def dyson_eq(): #create the folder to run the calculation folder_dyson = 'gw-zeros' if not os.path.isdir(folder_dyson): shell = bash() shell.add_command('mkdir -p %s' % folder_dyson) shell.add_command('cp -r database/SAVE %s/' % folder_dyson) shell.run() shell.clean() dyson = YamboIn('%s -p p -g n -V all'%yambo,folder=folder_dyson) dyson['EXXRLvcs'] = [80,'Ry'] # Self-energy. Exchange dyson['BndsRnXp'] = [1,25] # Screening. Number of bands dyson['NGsBlkXp'] = [ 3,'Ry'] # Cutoff Screening dyson['GbndRnge'] = [1,25] # Self-energy. Number of bands dyson['QPkrange'][0][2:] = [2, 6] dyson['DysSolver'] = "n" dyson.write('%s/yambo_newton.in' % folder_dyson) dyson['DysSolver'] = "s" dyson.write('%s/yambo_secant.in' % folder_dyson) shell = bash() print("calculating with Newton and Secant solver in folder 'gw-zeros/newton' and 'gw-zeros/secant'...") shell.add_command('cd %s' % folder_dyson) shell.add_command('rm -f *.json newton/o-* secant/o-*') #cleanup shell.add_command('%s -F yambo_newton.in -J newton -C newton' % yambo) shell.add_command('%s -F yambo_secant.in -J secant -C secant' % yambo) shell.run() shell.clean() def plot_dyson(): #pack the files in .json files pack_files_in_folder('gw-zeros') ya = YamboAnalyser('gw-zeros') print('plot kpoints for Newton and secant solver') path = [[[0, 0, 0],'$\Gamma$'], [[0.5, 0, 0],'M'], [[0.3333,0.3333, 0.0],'K'], [[0.0, 0.0, 0.0],'$\Gamma$']] ya.plot_gw_path('qp',path, cols=(lambda x: x[2]+x[3],)) if __name__ == "__main__": #parse options parser = argparse.ArgumentParser(description='GW convergence') parser.add_argument('-c' ,'--convergence', action="store_true", help='Run convergence calculations') parser.add_argument('-p' ,'--plot', action="store_true", help='Pack into json files and plot the convergence results') parser.add_argument('-g' ,'--gw', action="store_true", help='Run a single GW calculation') parser.add_argument('-r' ,'--results', action="store_true", help='Pack into json files and plot a single GW calculation') parser.add_argument('-x' ,'--xi', action="store_true", help='GW calculations for several approximations of the Screenning') parser.add_argument('-xp' ,'--xp', action="store_true", help='Plot GW results for COHSEX, PPA and RA') parser.add_argument('-z' ,'--zeros', action="store_true", help='GW calculations for Newton and Secant Solver') parser.add_argument('-zp' ,'--zp', action="store_true", help='Plot GW results for Newton and Secant Solver') args = parser.parse_args() if len(sys.argv)==1: parser.print_help() sys.exit(1) create_save() if args.convergence: gw_convergence() if args.plot: plot_convergence() if args.gw: gw() if args.results: plot_gw() if args.xi: xi() if args.xp: plot_xi() if args.zeros: dyson_eq() if args.zp: plot_dyson()
palful/yambopy
tutorial/bn/gw_conv_bn.py
Python
bsd-3-clause
10,513
[ "Yambo" ]
e937655d83a0b095bdb9e91a0b34b75d50e34e17c3495b7f1ff965e61069baa9
from __future__ import print_function import os import time import numpy as np from copy import deepcopy as copy from collections import OrderedDict from .tensor import VOIGT, I9, inv, dot from .environ import environ from .tensor import array_rep, unrotate, rotate from .material import Material from .misc import is_stringlike, is_listlike from .logio import logger, add_filehandler, splash from .database import DatabaseFile, COMPONENT_SEP, groupby_names from .stress_control import d_from_prescribed_stress, numerical_jacobian import matmodlab2.core.linalg as la import matmodlab2.core.deformation as dfm continued = {'continued': 1} __all__ = ['MaterialPointSimulator'] class MaterialPointSimulator(object): """The material point simulator The material point simulator exercises a material model just as a finite element solver would. Parameters ---------- jobid : str A job identifier initiali_temp : float [0.] The (optional) initial temperature db_fmt : str ['npz'], {'npz', 'exo'} The output database format. npz is the numpy compressed storage format and exo is the ExodusII format logfile : bool [False] Whether to write a log file write_db : bool [True] Whether to write the database file ufield : ndarray [None] The initial user defined field. Notes ----- The steps to creating and running a simulation with the `MaterialPointSimulator` are: 1. Instantiate a `MaterialPointSimulator` simulator object, giving it a string `jobid` 2. Assign a material model to the simulator 3. Add simulation (deformation) steps to the simulator The steps are run as they are added. Output is written to either a compressed numpy file or an ExodusII database file. ExodusII database files can be viewed using the open source `tsviewer` or `ParaView`. Examples -------- In the following simulation, the `ElasticMaterial` is exercised through a step of uniaxial strain and then a stress controlled step to bring the material to a state of zero stress. >>> jobid = 'Job-1' >>> mps = MaterialPointSimulator(jobid) >>> material = ElasticMaterial(E=10, Nu=.1) >>> mps.assign_material(material) >>> mps.run_step('EEEEEE', [1., 0., 0., 0., 0., 0.]) >>> mps.run_step('SSSEEE', [0., 0., 0., 0., 0., 0.]) """ valid_descriptors = ['DE', 'E', 'S', 'DS', 'U', 'F'] def __init__(self, jobid, initial_temp=0., db_fmt='npz', logfile=False, write_db=True, ufield=None): logger.info('Initializing the simulation') self.jobid = jobid self.ran = False # File I/O if logfile: add_filehandler(logger, self.jobid+'.log') splash(logger) if db_fmt not in ('npz', 'exo'): raise ValueError('db_fmt must by npz or exo') self.db_fmt = db_fmt logger.info('Matmodlab simulation: {0!r}'.format(self.jobid)) # Create initial step self.initial_temp = initial_temp logger.info('Creating initial step... ', extra=continued) self.steps = self._initialize_steps(self.initial_temp, ufield) logger.info('done') # Set defaults self._df = None self._columns = None self._material = None self._elem_var_names = None self.db = None self.data = None self.num_ufield = len(self.steps[0].ufield) # Following attributes are only applicable if using add_step/run self._steps = [] self.write_db = write_db self._initialized = False logger.info('Done initializing the simulation') def _initialize_steps(self, temp, ufield): """Create the initial step""" begin, end = 0., 0. components = np.zeros(6) descriptors = ['E'] * 6 return [Step(0, 0, 1, descriptors, components, temp, 0, ufield)] def _format_descriptors_and_components(self, descriptors, components): """Validate the user given descriptors""" # Make sure components is an array. Whatever the length of the # components is the length of the final descriptors if not is_listlike(components): components = [components] # Specify 'float64' for type consistency (important for some functions). components = np.array(components, dtype=np.float64) if is_stringlike(descriptors): if len(descriptors) == 1: # Lazy typing... descriptors = descriptors * len(components) elif not is_listlike(descriptors): raise TypeError('descriptors must be list_like or string_like') descriptors = list(descriptors) if len(descriptors) != len(components): raise ValueError('components and descriptors must have same number' 'of entries') for (i, descriptor) in enumerate(descriptors): if descriptor.upper() not in self.valid_descriptors: raise ValueError('Invalid descriptor {0!r}'.format(descriptor)) descriptors[i] = descriptor.upper() unique_descriptors = list(set(descriptors)) if 'F' in unique_descriptors: if len(unique_descriptors) != 1: raise ValueError('Cannot mix F with other descriptors') elif len(descriptors) != 9: raise ValueError('Must specify all 9 components of F') elif 'U' in unique_descriptors: if len(unique_descriptors) != 1: raise ValueError('Cannot mix U with other descriptors') elif len(descriptors) != 3: raise ValueError('Must specify all 3 components of U') elif np.any(np.in1d(['E', 'S', 'DE', 'DS'], descriptors)): if len(descriptors) > 6: raise ValueError('At most 6 components of stress/strain ' 'can be prescribed') return descriptors, components def add_step(self, descriptors, components, increment=1., frames=1, scale=1., kappa=0., temperature=0., time_whole=None, ufield=None): self._steps.append((descriptors, components, increment, frames, scale, kappa, temperature, time_whole, ufield)) def run(self): """Run the simulation""" if not self._steps: logger.warning('No steps to run') return for step in self._steps: (descriptors, components, increment, frames, scale, kappa, temperature, time_whole, ufield) = step self.run_step(descriptors, components, increment=increment, frames=frames, scale=scale, kappa=kappa, temperature=temperature, time_whole=time_whole, ufield=ufield) if self.write_db: self.dump() def run_step(self, descriptors, components, increment=1., frames=1, scale=1., kappa=0., temperature=0., time_whole=None, ufield=None): """Create a deformation step for the simulation Parameters ---------- descriptors : string or listlike of string Descriptors for each component of deformation. Each `descriptor` in `descriptors` must be one of: - `E`: representing strain - `DE`: representing an increment in strain - `S`: representing stress - `DS`: representing an increment in stress - `F`: representing the deformation gradient - `U`: representing displacement components : listlike of floats The components of deformation. `components[i]` is interpreted as `descriptors[i]`. Thus, `len(components)` must equal `len(descriptors)` increment : float, optional The length of the step in time units, default is 1. frames : int, optional The number of discrete increments in the step, default is 1 scale : float or listlike of float Scaling factor to be applied to components. If scale kappa : float The Seth-Hill parameter of generalized strain. Default is 0. temperature : float The temperature at the end of the step. Default is 0. time_whole : float The whole time at the end of the step. Default is `None`. If defined, the `increment` argument is ignored. ufield : ndarray [None] The value of the user defined field at the end of the step. The ufield argument must have been also passed to the constructor. The interpolated value at the beginning of an increment and the increment in ufield are passed to material models as the `ufield` and `dufield` keyword arguments, respectively. Tensor Component Ordering ------------------------- Component ordering for components is: 1. Symmetric tensors: XX, YY, ZZ, XY, YZ, XZ 2. Unsymmetric tensors: XX, XY, XZ YX, YY, YZ ZX, ZY, ZZ 3. Vectors: X, Y, Z Examples -------- To create a step of uniaxial strain with magnitude .1: >>> obj.run_step('EEEEEE', [1., 0., 0., 0., 0., 0.], scale=.1) To create a step of uniaxial stress with magnitude 1e6: >>> obj.run_step('SSSEEE', [1., 0., 0., 0., 0., 0.], scale=1e6) Stress and strain (and their increments) can be mixed. To create a step of uniaxial stress by holding the lateral stress components at 0. and deforming along the axial direction: >>> obj.run_step('ESSEEE', [1., 0., 0., 0., 0., 0.], scale=.1) To create a step of uniaxial strain, controlled by deformation gradient: >>> obj.run_step('FFFFFFFFF', [1.05, 0., 0., 1., 0., 0.], scale=1e6) Note, all 9 components of the deformation gradient must be prescribed. Special deformation cases are volumetric strain and pressure. Each is defined by prescribing one, and only one, component of either strain or stress, respectively: Volumetric strain. >>> obj.run_step('E', .1) Pressure: >>> obj.run_step('S', 1, scale=1e6) Notes ----- Prescribed deformation gradient and displacement components are converted to strain. Internally, the driver works only with strain, stress, or their increments. For stress, strain (and their increments), or mixed steps, all components of deformation need not be prescribed (but `len(descriptors)` must be equal to `len(components)`). Any missing components are assumed to be 0 strain. Accordingly, the following two steps would be treated identically: >>> obj.run_step('ESSEEE', [1., 0., 0., 0., 0., 0.], scale=.1) >>> obj.run_step('ESS', [1., 0., 0.], scale=.1) Steps are run when they are added. """ if self.material is None: raise RuntimeError('Material must be assigned before adding steps') if not self._initialized: self.initialize_data() descriptors, components = self._format_descriptors_and_components( descriptors, components) # Stress control must have kappa = 0 if any([x in descriptors for x in ('S', 'DS')]) and abs(kappa) > 1e-12: raise ValueError('Stress control requires kappa = 0') istep = len(self.steps) previous_step = self.steps[-1] if ufield is not None: if previous_step.ufield is None: s = 'Invalid ufield specification on step {0}.\n' \ 'Was ufield passed to the MaterialPointSimulator\'s ' \ 'constructor?' raise ValueError(s.format(istep)) if not is_listlike(ufield): ufield = [ufield] ufield = np.asarray(ufield) if ufield.shape != previous_step.ufield.shape: s = 'Invalid ufield specification on step {0}.\n' \ 'ufield.shape is different than previous step' raise ValueError(s.format(istep)) if previous_step.ufield is not None and ufield is None: # Must define ufield for *all* steps ufield = np.asarray(previous_step.ufield) if not is_listlike(scale): # Scalar scale factor scale = np.ones(len(components)) * scale scale = np.asarray(scale) if len(scale) != len(components): raise ValueError('components and scale must have same length') # Apply scaling factors components = components * scale if 'F' in descriptors: pass # Convert deformation gradient to strain #components, rotation = dfm.strain_from_defgrad(components, kappa) #if np.max(np.abs(rotation - np.eye(3))) > 1e-8: # raise ValueError('QR decomposition of deformation gradient ' # 'gave unexpected rotations (rotations are ' # 'not yet supported)') #descriptors = ['E'] * 6 elif 'U' in descriptors: # Convert displacement to strain U = np.zeros((3, 3)) DI3 = np.diag_indices(3) U[DI3] = components + 1. components = dfm.strain_from_stretch(array_rep(U,(6,)), kappa) descriptors = ['E'] * 6 elif 'E' in descriptors and len(descriptors) == 1: # only one strain value given -> volumetric strain components = dfm.scalar_volume_strain_to_tensor(components[0], kappa) descriptors = ['E'] * 6 elif 'S' in descriptors and len(descriptors) == 1: # only one stress value given -> pressure Sij = -components[0] components = np.array([Sij, Sij, Sij, 0., 0., 0.]) descriptors = ['S'] * 6 elif 'DS' in descriptors and len(descriptors) == 1: # only one stress value given -> pressure ds = -components[0] components = np.array([ds, ds, ds, 0., 0., 0.]) descriptors = ['DS', 'DS', 'DS', 'E', 'E', 'E'] if np.any(np.in1d(['E', 'S', 'DE', 'DS'], descriptors)): # Stress/strain must have length == 6 if len(descriptors) != 6: n = 6 - len(descriptors) descriptors.extend(['E'] * n) components = np.append(components, [0.] * n) xc = '[{0}]'.format(', '.join(['{0:g}'.format(x) for x in components])) logger.debug('Adding step {0:4d} with descriptors: {1}\n' ' and components: {2}'.format( istep, ''.join(descriptors), xc)) begin = self.steps[-1].end if time_whole is not None: time_whole = float(time_whole) if time_whole < begin: i = len(self.steps)+1 raise ValueError('time_whole for step {0} ' '< beginning time'.format(i)) increment = time_whole - begin end = begin + increment step = Step(begin, end, frames, descriptors, components, temperature, kappa, ufield) # Add space for this step irow, icol = self.data.shape self.data = np.row_stack((self.data, np.zeros((frames, icol)))) # Now run the thing - adding enough rows to the data array for this step logger.info('\rRunning step {0}... '.format(istep), extra=continued) assert step.begin == previous_step.end self.run_istep(istep, step.begin, step.end, step.frames, step.descriptors, step.components, step.temp, step.ufield, step.kappa, self.J0, self.data[irow-1:, :]) logger.info('done') self.steps.append(step) self.ran = True return step @property def material(self): return self._material @material.setter def material(self, material): self.assign_material(material) def assign_material(self, material): """Assign the material model to the `MaterialPointSimulator` Parameters ---------- material : Material A material model Notes ----- `material` is assumed to be subclassed from the `Material` class. Accordingly, the following members are assumed to exist: - `material.name`: The name of the material. Default is `None` - `material.num_sdv`: Number of state dependent variables. Default is `None` - `material.sdv_names`: Names of state dependent variables (in order expected by model). Default is `None`. If `material.num_sdv` is not `None` and `material.sdv_names` is `None`, state dependent variables are given the names `SDV.1`, `SDV.2`, ..., `SDV.num_sdv` The following methods are assumed to exist: - `material.sdvini`: Initialize state dependent variables. All state dependent variables are assumed to have an initial value of 0. The method `sdvini` is used to change this initial value. - `material.eval`: The material state update. See the documentation for the `Material` base class for more information """ if not hasattr(material, 'eval'): raise Exception('Material models must define the `eval` method') self.mat_is_Material_subclass = hasattr(material, 'base_eval') optional_attrs = ('name', 'num_sdv', 'sdv_names', 'sdvini') not_defined = [] for attr in optional_attrs: try: getattr(material, attr) except AttributeError: not_defined.append(attr) name = getattr(material, 'name', None) logger.info('Assigning material {0!r}'.format(name)) if not_defined: attrs = ', '.join(not_defined) logger.warning('Optional material members not defined: ' + attrs) self._material = material material.assigned = True def undo_step(self): """Undo the last step, resetting the state variables """ total_frames = sum(_.frames for _ in self.steps) if len(self.steps) <= 1: raise Exception("Cannot undo initialization step") num_kept_frames = sum(_.frames for _ in self.steps[:-1]) self.steps = self.steps[:-1] self.data = self.data[:-1] def initialize_data(self): """When the material is assigned, initialize the database""" if self.material is None: raise RuntimeError('Material not assigned') # Setup the array of simulation data columns = list(self.columns.keys()) num_vars = len(columns) #num_incs = 1 num_incs = sum(step.frames for step in self.steps) self.data = np.zeros((num_incs, num_vars)) # Put the initial state in the output database step = self.steps[0] statev = self.initialize_statev() strain = np.where(step.descriptors=='E', step.components, 0.) stress = np.where(step.descriptors=='S', step.components, 0.) defgrad = dfm.defgrad_from_strain(strain, step.kappa) glo_var_vals = [step.increment, 1, 0] elem_var_vals = self.astack(strain, np.zeros(6), stress, stress-np.zeros(6), defgrad, step.temp, step.ufield, statev) self.data[0, 0 ] = step.end self.data[0, 1:4] = glo_var_vals self.data[0, 4:] = elem_var_vals # Call the material with a zero state to get the initial Jacobian dtemp = 0. dufield = np.zeros_like(step.ufield) self.J0 = numerical_jacobian(self.eval, 1, 1, step.temp, dtemp, defgrad, defgrad, np.zeros(6), np.zeros(6), np.zeros(6), step.ufield, dufield, copy(statev), range(6)) # This step is not actually ran - it's just the initial state step.ran = True self._initialized = True @property def df(self): """Return the DataFrame containing simulation data""" from pandas import DataFrame if self._df is None or self._df.shape[0] < self.data.shape[0]: columns = list(self.columns.keys()) self._df = DataFrame(self.data, columns=columns) return self._df def get_from_df(self, key): """Get `key` from the database Parameters ---------- key : str key is the name of the variable to get from the database. Returns ------- df : DataFrame A Pandas DataFrame containing the values for `key` for all times Notes ----- `key` can be either: - a single component like `F.XX`, in which case a `DataSeries` will be returned containing `F.XX` through all time of the simulation - a name like `F`, in which case a `DataFrame` will be returned containing all of the components of `F` through all time of the simulation """ if key in self.df: return self.df[key] keys = self.expand_name_to_keys(key, self.df.columns) return self.df[keys] def get_from_a(self, key): """Get the value of key from the data array""" if key in self.columns: return self.data[:, self.columns[key]] columns = list(self.columns.keys()) keys = self.expand_name_to_keys(key, columns) if keys is None: return None ix = [columns[key] for key in keys] return self.data[:, ix] def get(self, key, df=None): df = df or environ.notebook if df: return self.get_from_df(key) return self.get_from_a(key) def get2(self, *keys, **kwargs): df = kwargs.get('df', None) or environ.notebook if df: if is_listlike(keys): keys = list(keys) return self.df[keys] ix = [self.columns[key] for key in keys] return self.data[:, ix] def plot(self, *args, **kwargs): return self.df.plot(*args, **kwargs) def dump(self, filename=None): """Write results to output database""" logger.info('Opening the output database... ', extra=continued) if filename is None: filename = self.jobid + '.' + self.db_fmt root, ext = os.path.splitext(filename) if not ext: ext = '.'+self.db_fmt filename = filename + ext assert ext in ('.npz', '.exo') if ext == '.exo': self._write_exodb(filename) elif ext == '.npz': self._write_npzdb(filename) def _write_exodb(self, filename): """Write the results to a exodus database""" self.db = DatabaseFile(filename, 'w') logger.info('done') logger.info('Output database: {0!r}'.format(self.db.filename)) logger.info('Initializing the output database... ', extra=continued) self.db.initialize(self.glo_var_names, self.elem_var_names) logger.info('done') logger.info('Writing data to {0!r}'.format(self.db.filename)) num_glo_vars = len(self.glo_var_names) i, j = 1, 1 + num_glo_vars start = time.time() for row in self.data: end_time = row[0] glo_var_vals = row[i:j] elem_var_vals = row[j:] self.db.save(end_time, glo_var_vals, elem_var_vals) dt = time.time() - start logger.info('Done writing data {0:.2f}'.format(dt)) logger.info('Closing the output database... ', extra=continued) self.db.close() logger.info('done') def dumpz(self, filename=None): """Write results to output database""" if filename is None: filename = self.jobid if not filename.endswith('.npz'): filename += '.npz' self._write_npzdb(filename) def _write_npzdb(self, filename): logger.info('Writing data to {0!r}... '.format(filename), extra=continued) columns = list(self.columns.keys()) with open(filename, 'wb') as fh: np.savez(fh, columns=columns, data=self.data) logger.info('done') def expand_name_to_keys(self, key, columns): names_and_cols = groupby_names(columns) if key not in names_and_cols: return None sep = COMPONENT_SEP keys = ['{0}{1}{2}'.format(key, sep, x) for x in names_and_cols[key]] return keys @property def columns(self): if self._columns is not None: return self._columns columns = ['Time']+self.glo_var_names+self.elem_var_names self._columns = OrderedDict([(x,i) for (i,x) in enumerate(columns)]) return self._columns @property def glo_var_names(self): return ['DTime', 'Step', 'Frame'] @property def elem_var_names(self): """Returns the list of element variable names""" if self.material is None: raise ValueError('Material must first be assigned') if self._elem_var_names is not None: return self._elem_var_names def expand_var_name(name, components): sep = COMPONENT_SEP return ['{0}{1}{2}'.format(name, sep, x) for x in components] xc1 = ['X', 'Y', 'Z'] xc2 = ['XX', 'YY', 'ZZ', 'XY', 'YZ', 'XZ'] xc3 = ['XX', 'XY', 'XZ', 'YX', 'YY', 'YZ', 'ZX', 'ZY', 'ZZ'] elem_var_names = [] elem_var_names.extend(expand_var_name('E', xc2)) elem_var_names.extend(expand_var_name('DE', xc2)) elem_var_names.extend(expand_var_name('S', xc2)) elem_var_names.extend(expand_var_name('DS', xc2)) elem_var_names.extend(expand_var_name('F', xc3)) elem_var_names.append('Temp') # User defined field if self.num_ufield: ufield_names = expand_var_name('UFIELD', range(1, self.num_ufield+1)) elem_var_names.extend(ufield_names) # Material state variables num_sdv = getattr(self.material, 'num_sdv', Material.num_sdv) sdv_names = getattr(self.material, 'sdv_names', Material.sdv_names) if num_sdv: if sdv_names: assert len(sdv_names) == num_sdv else: sdv_names = expand_var_name('SDV', range(1, num_sdv+1)) elem_var_names.extend(sdv_names) # Names for material aux models if hasattr(self.material, 'aux_models'): for (name, aux_model) in self.material.aux_models.items(): elem_var_names.extend(aux_model.sdv_names) self._elem_var_names = elem_var_names return elem_var_names def initialize_statev(self): """Initialize the state dependent variables - including aux models""" numx = getattr(self.material, 'num_sdv', Material.num_sdv) statev = None if numx is None else np.zeros(numx) try: statev = self.material.sdvini(statev) except AttributeError: pass aux_sdv = [] if hasattr(self.material, 'aux_models'): for (name, aux_model) in self.material.aux_models.items(): xv = np.zeros(aux_model.num_sdv) aux_sdv.extend(aux_model.sdvini(xv)) if aux_sdv: if statev is not None: statev = np.append(statev, aux_sdv) else: statev = np.array(aux_sdv) return statev def astack(self, E, DE, S, DS, F, T, UF, XV): """Concatenates input arrays into a single flattened array""" a = [E, DE, S, DS, F, T] if len(UF): a.append(UF) if XV is not None: a.append(XV) return np.hstack(tuple([x for x in a if x is not None])) def run_istep(self, istep, begin, end, frames, descriptors, components, temp, ufield, kappa, J0, data): """Run this step, using the previous step as the initial state Parameters ---------- istep : int The step number to run """ if 'F' in descriptors: self._run_istep_F(istep, begin, end, frames, descriptors, components, temp, ufield, kappa, J0, data) else: self._run_istep(istep, begin, end, frames, descriptors, components, temp, ufield, kappa, J0, data) def _run_istep(self, istep, begin, end, frames, descriptors, components, temp, ufield, kappa, J0, data): assert istep != 0 increment = end - begin #---------------------------------------------------------------------- # # The following variables have values at # [begining, end, current] of step # # The deformation gradient has values at # [begining of step, end of step, beginning of frame, current] #---------------------------------------------------------------------- # # Time time = np.array([begin, end, begin]) # Strain and stress states start_strain = data[0, 4:10] start_stress = data[0, 16:22] end_strain = np.where(descriptors=='E', components, 0.) end_stress = np.where(descriptors=='S', components, 0.) strain = np.vstack((start_strain, end_strain, start_strain)) stress = np.vstack((start_stress, end_stress, start_stress)) # Temperature temp = np.array((data[0, 37], temp, data[0, 37])) dtemp = (temp[1] - temp[0]) / float(frames) # User defined field start = 38 end = start + len(ufield) ufield = np.array((data[0, start:end], ufield, data[0, start:end])) dufield = (ufield[1] - ufield[0]) / float(frames) start = end #---------------------------------------------------------------------- # # The following variables have values at # [begining, current] of step #---------------------------------------------------------------------- # start_statev = data[0, start:] if not len(start_statev): statev = [None, None] else: statev = np.vstack((start_statev, start_statev)) start_defgrad = data[0, 28:37] F = np.vstack((start_defgrad, start_defgrad)) dtime = 1. if increment < 1.e-14 else (time[1]-time[0])/float(frames) # v array is an array of integers that contains the rows and columns of # the slice needed in the jacobian subroutine. nv = 0 v = np.zeros(6, dtype=np.int) for (i, cij) in enumerate(components): descriptor = descriptors[i] if descriptor == 'DE': # -- strain rate strain[1, i] = strain[0, i] + cij * increment elif descriptor == 'E': # -- strain strain[1, i] = cij elif descriptor == 'DS': # -- stress rate stress[1, i] = stress[0, i] + cij * increment v[nv] = i nv += 1 elif descriptor == 'S': # -- stress stress[1, i] = cij v[nv] = i nv += 1 else: raise ValueError('Invalid descriptor {0!r}'.format(descriptor)) v = v[:nv] vx = [x for x in range(6) if x not in v] if increment < 1.e-14: dedt = np.zeros_like(strain[1]) else: dedt = (strain[1] - strain[0]) / increment # --- find current value of d: sym(velocity gradient) if not nv: # strain or strain rate prescribed and the strain rate is constant # over the entire step if abs(kappa) > 1.e-14: d = dfm.rate_of_strain_to_rate_of_deformation(dedt, strain[2], kappa) elif environ.SQA: d = dfm.rate_of_strain_to_rate_of_deformation(dedt, strain[2], kappa) if not np.allclose(d, dedt): logger.warn('SQA: d != dedt') else: d = np.array(dedt) else: # Initial guess for d[v] dedt[v] = 0. Jsub = J0[[[x] for x in v], v] work = (stress[1,v] - stress[0,v]) / increment try: dedt[v] = la.solve(Jsub, work) except: dedt[v] -= la.lstsq(Jsub, work)[0] dedt[v] = dedt[v] / VOIGT[v] # Process each frame of the step for iframe in range(frames): a1 = float(frames - (iframe + 1)) / frames a2 = float(iframe + 1) / frames strain[2] = a1 * strain[0] + a2 * strain[1] pstress = a1 * stress[0] + a2 * stress[1] if nv: # One or more stresses prescribed d = d_from_prescribed_stress( self.eval, time[2], dtime, temp[2], dtemp, F[0], F[1], strain[2]*VOIGT, dedt*VOIGT, stress[2], ufield[2], dufield, statev[0], v, pstress[v]) d = d / VOIGT # compute the current deformation gradient and strain from # previous values and the deformation rate F[1], e = dfm.update_deformation(F[0], d, dtime, kappa) strain[2,v] = e[v] if environ.SQA and not np.allclose(strain[2,vx], e[vx]): logger.warn('SQA: bad strain on step {0}'.format(istep)) state = self.eval(kappa, time[2], dtime, temp[2], dtemp, F[0], F[1], strain[2]*VOIGT, d*VOIGT, np.array(stress[2]), ufield[2], dufield, statev[1]) s, x, ddsdde = state self.ddsdde = ddsdde F[0] = F[1] dstress = s - stress[2] time[2] = a1 * time[0] + a2 * time[1] temp[2] = a1 * temp[0] + a2 * temp[1] ufield[2] = a1 * ufield[0] + a2 * ufield[1] stress[2], statev[1] = s, x statev[0] = statev[1] glo_var_vals = [dtime, istep+1, iframe+1] elem_var_vals = self.astack(strain[2], dedt, stress[2], dstress, F[1], temp[2], ufield[2], x) data[iframe+1, 0] = time[2] data[iframe+1, 1:4] = glo_var_vals data[iframe+1, 4:] = elem_var_vals def _run_istep_F(self, istep, begin, end, frames, descriptors, components, temp, ufield, kappa, J0, data): assert istep != 0 increment = end - begin #---------------------------------------------------------------------- # # The following variables have values at # [begining, end, current] of step # # The deformation gradient has values at # [begining of step, end of step, beginning of frame, current] #---------------------------------------------------------------------- # # Time time = np.array([begin, end, begin]) # Strain and stress states start_strain = data[0, 4:10] start_stress = data[0, 16:22] start_defgrad = data[0, 28:37] # Will be computed from defgrad end_strain = np.zeros(6) end_stress = np.zeros(6) end_defgrad = components strain = np.vstack((start_strain, end_strain, start_strain)) stress = np.vstack((start_stress, end_stress, start_stress)) F = np.vstack((start_defgrad, end_defgrad, start_defgrad, I9)) # Temperature temp = np.array((data[0, 37], temp, data[0, 37])) dtemp = (temp[1] - temp[0]) / float(frames) # User defined field start = 38 end = start + len(ufield) ufield = np.array((data[0, start:end], ufield, data[0, start:end])) dufield = (ufield[1] - ufield[0]) / float(frames) start = end #---------------------------------------------------------------------- # # The following variables have values at # [begining, current] of step #---------------------------------------------------------------------- # start_statev = data[0, start:] if not len(start_statev): statev = [None, None] else: statev = np.vstack((start_statev, start_statev)) dtime = 1. if increment < 1.e-14 else (time[1]-time[0])/float(frames) # Process each frame of the step for iframe in range(frames): a1 = float(frames - (iframe + 1)) / frames a2 = float(iframe + 1) / frames F[3] = a1 * F[0] + a2 * F[1] _, R0 = dfm.strain_from_defgrad(F[2], kappa) strain[2], R = dfm.strain_from_defgrad(F[3], kappa) if dtime < 1.e-14: d = np.zeros(6) else: d = dfm.rate_of_defomation_from_defgrad(F[2], F[3], dtime) dedt = d # FIXME: this is for output only, should be fixed #d_b = unrotate(R0, d) #strain_b = unrotate(R0, strain[2]) #stress_b = unrotate(R0, stress[2]) state = self.eval(kappa, time[2], dtime, temp[2], dtemp, F[2], F[3], strain[2]*VOIGT, d*VOIGT, np.array(stress[2]), ufield[2], dufield, statev[1]) s, x, ddsdde = state self.ddsdde = ddsdde F[2] = F[3] dstress = s - stress[2] time[2] = a1 * time[0] + a2 * time[1] temp[2] = a1 * temp[0] + a2 * temp[1] ufield[2] = a1 * ufield[0] + a2 * ufield[1] statev[1] = x statev[0] = statev[1] stress[2] = s #stress[2] = rotate(R, s) glo_var_vals = [dtime, istep+1, iframe+1] elem_var_vals = self.astack(strain[2], dedt, stress[2], dstress, F[3], temp[2], ufield[2], x) data[iframe+1, 0] = time[2] data[iframe+1, 1:4] = glo_var_vals data[iframe+1, 4:] = elem_var_vals def eval(self, kappa, time, dtime, temp, dtemp, F0, F, strain, d, stress, ufield, dufield, statev, **kwds): """Wrapper method to material.eval. This is called by Matmodlab so that aux models can first be evaluated. See documentation for eval. """ if self.mat_is_Material_subclass: return self.material.base_eval(kappa, time, dtime, temp, dtemp, F0, F, strain, d, stress, ufield, dufield, statev, self.initial_temp, **kwds) else: # Not a subclass of the Material class, call its eval method return self.material.eval(time, dtime, temp, dtemp, F0, F, strain, d, stress, statev, ufield=ufield, dufield=dufield, **kwds) class Step(object): def __init__(self, begin, end, frames, descriptors, components, temp, kappa, ufield=None): assert len(components) == len(descriptors) # assert len(descriptors) == 6 self.begin = float(begin) self.end = float(end) self.increment = self.end - self.begin if abs(self.increment) > 0.: assert end > begin self.frames = frames self.components = np.asarray(components) self.descriptors = np.asarray(descriptors) self.temp = temp self.kappa = kappa self.ran = False if ufield is not None: if not is_listlike(ufield): ufield = [ufield] else: ufield = [] ufield = np.asarray(ufield) if len(ufield.shape) != 1: raise ValueError('ufield must be a 1D array') self.ufield = ufield
matmodlab/matmodlab2
matmodlab2/core/matmodlab.py
Python
bsd-3-clause
40,637
[ "ParaView" ]
a7faafc196e2952bbb8b54b7f33bed72bbac1f976fa3a6e876659c52eb895f9a
# -*- coding: utf-8 -*- """ This file contains the Qudi Logic to control Nuclear Operations. Qudi is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. Qudi is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with Qudi. If not, see <http://www.gnu.org/licenses/>. Copyright (c) the Qudi Developers. See the COPYRIGHT.txt file at the top-level directory of this distribution and at <https://github.com/Ulm-IQO/qudi/> """ import datetime import numpy as np import time from collections import OrderedDict from core.module import Connector, StatusVar from core.util.mutex import Mutex from logic.generic_logic import GenericLogic from qtpy import QtCore class NuclearOperationsLogic(GenericLogic): """ A higher order logic, which combines several lower class logic modules in order to perform measurements and manipulations of nuclear spins. DISCLAIMER: =========== This module has two major issues: - a lack of proper documentation of all the methods - usage of tasks is not implemented and therefore direct connection to all the modules is used (I tried to compress as good as possible all the part, where access to other modules occurs so that a later replacement would be easier and one does not have to search throughout the whole file.) The state of this module is considered to be UNSTABLE. I am currently working on that and will from time to time improve the status of this module. So if you want to use it, be aware that there might appear drastic changes. --- Alexander Stark """ _modclass = 'NuclearOperationsLogic' _modtype = 'logic' # declare connectors #TODO: Use rather the task runner instead directly the module! sequencegenerationlogic = Connector(interface='SequenceGenerationLogic') traceanalysislogic = Connector(interface='TraceAnalysisLogic') gatedcounterlogic = Connector(interface='CounterLogic') odmrlogic = Connector(interface='ODMRLogic') optimizerlogic = Connector(interface='OptimizerLogic') scannerlogic = Connector(interface='ScannerLogic') savelogic = Connector(interface='SaveLogic') # status vars electron_rabi_periode = StatusVar('electron_rabi_periode', 1800e-9) # in s # pulser microwave: pulser_mw_freq = StatusVar('pulser_mw_freq', 200e6) # in Hz pulser_mw_amp = StatusVar('pulser_mw_amp', 2.25) # in V pulser_mw_ch = StatusVar('pulser_mw_ch', -1) # pulser rf: nuclear_rabi_period0 = StatusVar('nuclear_rabi_period0', 30e-6) # in s pulser_rf_freq0 = StatusVar('pulser_rf_freq0', 6.32e6) # in Hz pulser_rf_amp0 = StatusVar('pulser_rf_amp0', 0.1) nuclear_rabi_period1 = StatusVar('nuclear_rabi_period1', 30e-6) # in s pulser_rf_freq1 = StatusVar('pulser_rf_freq1', 3.24e6) # in Hz pulser_rf_amp1 = StatusVar('pulser_rf_amp1', 0.1) pulser_rf_ch = StatusVar('pulser_rf_ch', -2) # laser options: pulser_laser_length = StatusVar('pulser_laser_length', 3e-6) # in s pulser_laser_amp = StatusVar('pulser_laser_amp', 1) # in V pulser_laser_ch = StatusVar('pulser_laser_ch', 1) num_singleshot_readout = StatusVar('num_singleshot_readout', 3000) pulser_idle_time = StatusVar('pulser_idle_time', 1.5e-6) # in s # detection gated counter: pulser_detect_ch = StatusVar('pulser_detect_ch', 1) # measurement parameters: current_meas_asset_name = StatusVar('current_meas_asset_name', '') x_axis_start = StatusVar('x_axis_start', 1e-3) # in s x_axis_step = StatusVar('x_axis_step', 10e-3) # in s x_axis_num_points = StatusVar('x_axis_num_points', 50) # How often the measurement should be repeated. num_of_meas_runs = StatusVar('num_of_meas_runs', 1) # parameters for confocal and odmr optimization: optimize_period_odmr = StatusVar('optimize_period_odmr', 200) optimize_period_confocal = StatusVar('optimize_period_confocal', 300) # in s odmr_meas_freq0 = StatusVar('odmr_meas_freq0', 10000e6) # in Hz odmr_meas_freq1 = StatusVar('odmr_meas_freq1', 10002.1e6) # in Hz odmr_meas_freq2 = StatusVar('odmr_meas_freq2', 10004.2e6) # in Hz odmr_meas_runtime = StatusVar('odmr_meas_runtime', 30) # in s odmr_meas_freq_range = StatusVar('odmr_meas_freq_range', 30e6) # in Hz odmr_meas_step = StatusVar('odmr_meas_step', 0.15e6) # in Hz odmr_meas_power = StatusVar('odmr_meas_power', -30) # in dBm # Microwave measurment parameters: mw_cw_freq = StatusVar('mw_cw_freq', 10e9) # in Hz mw_cw_power = StatusVar('mw_cw_power', -30) # in dBm # on which odmr peak the manipulation is going to be applied: mw_on_odmr_peak = StatusVar('mw_on_odmr_peak', 1) # Gated counter: gc_number_of_samples = StatusVar('gc_number_of_samples', 3000) # in counts gc_samples_per_readout = StatusVar('gc_samples_per_readout', 10) # in counts # signals sigNextMeasPoint = QtCore.Signal() sigCurrMeasPointUpdated = QtCore.Signal() sigMeasurementStopped = QtCore.Signal() sigMeasStarted = QtCore.Signal() def __init__(self, config, **kwargs): super().__init__(config=config, **kwargs) self.log.debug('The following configuration was found.') # checking for the right configuration for key in config.keys(): self.log.info('{0}: {1}'.format(key,config[key])) self.threadlock = Mutex() def on_activate(self): """ Initialisation performed during activation of the module. """ # establish the access to all connectors: self._save_logic = self.get_connector('savelogic') #FIXME: THAT IS JUST A TEMPORARY SOLUTION! Implement the access on the # needed methods via the TaskRunner! self._seq_gen_logic = self.get_connector('sequencegenerationlogic') self._trace_ana_logic = self.get_connector('traceanalysislogic') self._gc_logic = self.get_connector('gatedcounterlogic') self._odmr_logic = self.get_connector('odmrlogic') self._optimizer_logic = self.get_connector('optimizerlogic') self._confocal_logic = self.get_connector('scannerlogic') # current measurement information: self.current_meas_point = self.x_axis_start self.current_meas_index = 0 self.num_of_current_meas_runs = 0 self.elapsed_time = 0 self.start_time = datetime.datetime.now() self.next_optimize_time = self.start_time # store here all the measured odmr peaks self.measured_odmr_list = [] self._optimize_now = False self._stop_requested = False # store here all the measured odmr peaks self.measured_odmr_list = [] # Perform initialization routines: self.initialize_x_axis() self.initialize_y_axis() self.initialize_meas_param() # connect signals: self.sigNextMeasPoint.connect(self._meas_point_loop, QtCore.Qt.QueuedConnection) def on_deactivate(self): """ Deactivate the module properly. """ return def initialize_x_axis(self): """ Initialize the x axis. """ stop = self.x_axis_start + self.x_axis_step * self.x_axis_num_points self.x_axis_list = np.arange( self.x_axis_start, stop + (self.x_axis_step / 2), self.x_axis_step) self.current_meas_point = self.x_axis_start def initialize_y_axis(self): """ Initialize the y axis. """ self.y_axis_list = np.zeros(self.x_axis_list.shape) # y axis where current data are stored self.y_axis_fit_list = np.zeros(self.x_axis_list.shape) # y axis where fit is stored. # here all consequutive measurements are saved, where the # self.num_of_meas_runs determines the measurement index for the row. self.y_axis_matrix = np.zeros((1, len(self.x_axis_list))) # here all the measurement parameters per measurement point are stored: self.parameter_matrix = np.zeros((1, len(self.x_axis_list)), dtype=object) def initialize_meas_param(self): """ Initialize the measurement param containter. """ # here all measurement parameters will be included for any kind of # nuclear measurement. self._meas_param = OrderedDict() def start_nuclear_meas(self, continue_meas=False): """ Start the nuclear operation measurement. """ self._stop_requested = False if not continue_meas: # prepare here everything for a measurement and go to the measurement # loop. self.prepare_measurement_protocols(self.current_meas_asset_name) self.initialize_x_axis() self.initialize_y_axis() self.current_meas_index = 0 self.sigCurrMeasPointUpdated.emit() self.num_of_current_meas_runs = 0 self.measured_odmr_list = [] self.elapsed_time = 0 self.start_time = datetime.datetime.now() self.next_optimize_time = 0 # load the measurement sequence: self._load_measurement_seq(self.current_meas_asset_name) self._pulser_on() self.set_mw_on_odmr_freq(self.mw_cw_freq, self.mw_cw_power) self.mw_on() self.lock() self.sigMeasStarted.emit() self.sigNextMeasPoint.emit() def _meas_point_loop(self): """ Run this loop continuously until the an abort criterium is reached. """ if self._stop_requested: with self.threadlock: # end measurement and switch all devices off self.stopRequested = False self.unlock() self.mw_off() self._pulser_off() # emit all needed signals for the update: self.sigCurrMeasPointUpdated.emit() self.sigMeasurementStopped.emit() return # if self._optimize_now: self.elapsed_time = (datetime.datetime.now() - self.start_time).total_seconds() if self.next_optimize_time < self.elapsed_time: current_meas_asset = self.current_meas_asset_name self.mw_off() # perform optimize position: self._load_laser_on() self._pulser_on() self.do_optimize_pos() # perform odmr measurement: self._load_pulsed_odmr() self._pulser_on() self.do_optimize_odmr_freq() # use the new measured frequencies for the microwave: if self.mw_on_odmr_peak == 1: self.mw_cw_freq = self.odmr_meas_freq0 elif self.mw_on_odmr_peak == 2: self.mw_cw_freq = self.odmr_meas_freq1 elif self.mw_on_odmr_peak == 3: self.mw_cw_freq = self.odmr_meas_freq2 else: self.log.error('The maximum number of odmr can only be 3, ' 'therfore only the peaks with number 0, 1 or 2 can ' 'be selected but an number of "{0}" was set. ' 'Measurement stopped!'.format(self.mw_on_odmr_peak)) self.stop_nuclear_meas() self.sigNextMeasPoint.emit() return self.set_mw_on_odmr_freq(self.mw_cw_freq, self.mw_cw_power) # establish the previous measurement conditions self.mw_on() self._load_measurement_seq(current_meas_asset) self._pulser_on() self.elapsed_time = (datetime.datetime.now() - self.start_time).total_seconds() self.next_optimize_time = self.elapsed_time + self.optimize_period_odmr # if stop request was done already here, do not perform the current # measurement but jump to the switch off procedure at the top of this # method. if self._stop_requested: self.sigNextMeasPoint.emit() return # this routine will return a desired measurement value and the # measurement parameters, which belong to it. curr_meas_points, meas_param = self._get_meas_point(self.current_meas_asset_name) # this routine will handle the saving and storing of the measurement # results: self._set_meas_point(num_of_meas_runs=self.num_of_current_meas_runs, meas_index=self.current_meas_index, meas_points=curr_meas_points, meas_param=meas_param) if self._stop_requested: self.sigNextMeasPoint.emit() return # increment the measurement index or set it back to zero if it exceed # the maximal number of x axis measurement points. The measurement index # will be used for the next measurement if self.current_meas_index + 1 >= len(self.x_axis_list): self.current_meas_index = 0 # If the next measurement run begins, add a new matrix line to the # self.y_axis_matrix self.num_of_current_meas_runs += 1 new_row = np.zeros(len(self.x_axis_list)) # that vertical stack command behaves similar to the append method # in python lists, where the new_row will be appended to the matrix: self.y_axis_matrix = np.vstack((self.y_axis_matrix, new_row)) self.parameter_matrix = np.vstack((self.parameter_matrix, new_row)) else: self.current_meas_index += 1 # check if measurement is at the end, and if not, adjust the measurement # sequence to the next measurement point. if self.num_of_current_meas_runs < self.num_of_meas_runs: # take the next measurement index from the x axis as the current # measurement point: self.current_meas_point = self.x_axis_list[self.current_meas_index] # adjust the measurement protocol with the new current_meas_point self.adjust_measurement(self.current_meas_asset_name) self._load_measurement_seq(self.current_meas_asset_name) else: self.stop_nuclear_meas() self.sigNextMeasPoint.emit() def _set_meas_point(self, num_of_meas_runs, meas_index, meas_points, meas_param): """ Handle the proper setting of the current meas_point and store all the additional measurement parameter. @param int meas_index: @param int num_of_meas_runs @param float meas_points: @param meas_param: @return: """ # one matrix contains all the measured values, the other one contains # all the parameters for the specified measurement point: self.y_axis_matrix[num_of_meas_runs, meas_index] = meas_points self.parameter_matrix[num_of_meas_runs, meas_index] = meas_param # the y_axis_list contains the summed and averaged values for each # measurement index: self.y_axis_list[meas_index] = self.y_axis_matrix[:, meas_index].mean() self.sigCurrMeasPointUpdated.emit() def _get_meas_point(self, meas_type): """ Start the actual measurement (most probably with the gated counter) And perform the measurement with that routine. @return tuple (float, dict): """ # save also the count trace of the gated counter after the measurement. # here the actual measurement is going to be started and stoped and # then analyzed and outputted in a proper format. # Check whether proper mode is active and if not activated that: if self._gc_logic.get_counting_mode() != 'finite-gated': self._gc_logic.set_counting_mode(mode='finite-gated') self._gc_logic.set_count_length(self.gc_number_of_samples) self._gc_logic.set_counting_samples(self.gc_samples_per_readout) self._gc_logic.startCount() time.sleep(2) # wait until the gated counter is done or available to start: while self._gc_logic.getState() != 'idle' and not self._stop_requested: # print('in SSR measure') time.sleep(1) # for safety reasons, stop also the counter if it is still running: # self._gc_logic.stopCount() name_tag = '{0}_{1}'.format(self.current_meas_asset_name, self.current_meas_point) self._gc_logic.save_current_count_trace(name_tag=name_tag) if meas_type in ['Nuclear_Rabi', 'Nuclear_Frequency_Scan']: entry_indices = np.where(self._gc_logic.countdata>50) trunc_countdata = self._gc_logic.countdata[entry_indices] flip_prop, param = self._trace_ana_logic.analyze_flip_prob(trunc_countdata) elif meas_type in ['QSD_-_Artificial_Drive', 'QSD_-_SWAP_FID', 'QSD_-_Entanglement_FID']: # do something measurement specific pass return flip_prop, param def stop_nuclear_meas(self): """ Stop the Nuclear Operation Measurement. @return int: error code (0:OK, -1:error) """ with self.threadlock: if self.getState() == 'locked': self._stop_requested = True return 0 def get_fit_functions(self): """ Returns all fit methods, which are currently implemented for that module. @return list: with string entries denoting the names of the fit. """ return ['No Fit', 'pos. Lorentzian', 'neg. Lorentzian', 'pos. Gaussian'] def do_fit(self, fit_function=None): """ Performs the chosen fit on the measured data. @param string fit_function: name of the chosen fit function @return dict: a dictionary with the relevant fit parameters, i.e. the result of the fit """ #TODO: implement the fit. pass def get_meas_type_list(self): return ['Nuclear_Rabi', 'Nuclear_Frequency_Scan', 'QSD_-_Artificial_Drive', 'QSD_-_SWAP_FID', 'QSD_-_Entanglement_FID'] def get_available_odmr_peaks(self): """ Retrieve the information on which odmr peak the microwave can be applied. @return list: with string entries denoting the peak number """ return [1, 2, 3] def prepare_measurement_protocols(self, meas_type): """ Prepare and create all measurement protocols for the specified measurement type @param str meas_type: a measurement type from the list get_meas_type_list """ self._create_laser_on() self._create_pulsed_odmr() #FIXME: Move this creation routine to the tasks! if meas_type == 'Nuclear_Rabi': # generate: self._seq_gen_logic.generate_nuclear_meas_seq(name=meas_type, rf_length_ns=self.current_meas_point*1e9, rf_freq_MHz=self.pulser_rf_freq0/1e6, rf_amp_V=self.pulser_rf_amp0, rf_channel=self.pulser_rf_ch, mw_freq_MHz=self.pulser_mw_freq/1e6, mw_amp_V=self.pulser_mw_amp, mw_rabi_period_ns=self.electron_rabi_periode*1e9, mw_channel=self.pulser_mw_ch, laser_time_ns=self.pulser_laser_length*1e9, laser_channel=self.pulser_laser_ch, laser_amp_V=self.pulser_laser_amp, detect_channel=self.pulser_detect_ch, wait_time_ns=self.pulser_idle_time*1e9, num_singleshot_readout=self.num_singleshot_readout) # sample: self._seq_gen_logic.sample_pulse_sequence(sequence_name=meas_type, write_to_file=True, chunkwise=False) # upload: self._seq_gen_logic.upload_sequence(seq_name=meas_type) elif meas_type == 'Nuclear_Frequency_Scan': # generate: self._seq_gen_logic.generate_nuclear_meas_seq(name=meas_type, rf_length_ns=(self.nuclear_rabi_period0*1e9)/2, rf_freq_MHz=self.current_meas_point/1e6, rf_amp_V=self.pulser_rf_amp0, rf_channel=self.pulser_rf_ch, mw_freq_MHz=self.pulser_mw_freq/1e6, mw_amp_V=self.pulser_mw_amp, mw_rabi_period_ns=self.electron_rabi_periode*1e9, mw_channel=self.pulser_mw_ch, laser_time_ns=self.pulser_laser_length*1e9, laser_channel=self.pulser_laser_ch, laser_amp_V=self.pulser_laser_amp, detect_channel=self.pulser_detect_ch, wait_time_ns=self.pulser_idle_time*1e9, num_singleshot_readout=self.num_singleshot_readout) # sample: self._seq_gen_logic.sample_pulse_sequence(sequence_name=meas_type, write_to_file=True, chunkwise=False) # upload: self._seq_gen_logic.upload_sequence(seq_name=meas_type) elif meas_type == 'QSD_-_Artificial_Drive': pass elif meas_type == 'QSD_-_SWAP_FID': pass elif meas_type == 'QSD_-_Entanglement_FID': pass def adjust_measurement(self, meas_type): """ Adjust the measurement sequence for the next measurement point. @param meas_type: @return: """ if meas_type == 'Nuclear_Rabi': # only the rf asset has to be regenerated since that is the only # thing what has changed. # You just have to ensure that the RF pulse in the sequence # Nuclear_Rabi is called exactly like this RF pulse: # generate the new pulse (which will overwrite the Ensemble) self._seq_gen_logic.generate_rf_pulse_ens(name='RF_pulse', rf_length_ns=(self.current_meas_point*1e9)/2, rf_freq_MHz=self.pulser_rf_freq0/1e6, rf_amp_V=self.pulser_rf_amp0, rf_channel=self.pulser_rf_ch) # sample the ensemble (and maybe save it to file, which will # overwrite the old one): self._seq_gen_logic.sample_pulse_block_ensemble(ensemble_name='RF_pulse', write_to_file=True, chunkwise=False) # upload the new sampled file to the device: self._seq_gen_logic.upload_asset(asset_name='RF_pulse') elif meas_type == 'Nuclear_Frequency_Scan': # generate the new pulse (which will overwrite the Ensemble) self._seq_gen_logic.generate_rf_pulse_ens(name='RF_pulse', rf_length_ns=(self.nuclear_rabi_period0*1e9)/2, rf_freq_MHz=self.current_meas_point/1e6, rf_amp_V=self.pulser_rf_amp0, rf_channel=self.pulser_rf_ch) # sample the ensemble (and maybe save it to file, which will # overwrite the old one): self._seq_gen_logic.sample_pulse_block_ensemble(ensemble_name='RF_pulse', write_to_file=True, chunkwise=False) # upload the new sampled file to the device: self._seq_gen_logic.upload_asset(asset_name='RF_pulse') elif meas_type == 'QSD_-_Artificial Drive': pass elif meas_type == 'QSD_-_SWAP_FID': pass elif meas_type == 'QSD_-_Entanglement_FID': pass def _load_measurement_seq(self, meas_seq): """ Load the current measurement sequence in the pulser @param str meas_seq: the measurement sequence which should be loaded into the device. @return: """ # now load the measurement sequence again on the device, which will # load the uploaded pulse instead of the old one: self._seq_gen_logic.load_asset(asset_name=meas_seq) def _create_laser_on(self): """ Create the laser asset. @return: """ #FIXME: Move this creation routine to the tasks! # generate: self._seq_gen_logic.generate_laser_on(name='Laser_On', laser_time_bins=3000, laser_channel=self.pulser_laser_ch) # sample: self._seq_gen_logic.sample_pulse_block_ensemble(ensemble_name='Laser_On', write_to_file=True, chunkwise=False) # upload: self._seq_gen_logic.upload_asset(asset_name='Laser_On') def _load_laser_on(self): """ Load the laser on asset into the pulser. @return: """ #FIXME: Move this creation routine to the tasks! self._seq_gen_logic.load_asset(asset_name='Laser_On') def _pulser_on(self): """ Switch on the pulser output. """ self._set_channel_activation(active=True, apply_to_device=True) self._seq_gen_logic.pulser_on() def _pulser_off(self): """ Switch off the pulser output. """ self._set_channel_activation(active=False, apply_to_device=False) self._seq_gen_logic.pulser_off() def _set_channel_activation(self, active=True, apply_to_device=False): """ Set the channels according to the current activation config to be either active or not. @param bool active: the activation according to the current activation config will be checked and if channel is not active and active=True, then channel will be activated. Otherwise if channel is active and active=False channel will be deactivated. All other channels, which are not in activation config will be deactivated if they are not already deactivated. @param bool apply_to_device: Apply the activation or deactivation of the current activation_config either to the device and the viewboxes, or just to the viewboxes. """ pulser_const = self._seq_gen_logic.get_hardware_constraints() curr_config_name = self._seq_gen_logic.current_activation_config_name activation_config = pulser_const['activation_config'][curr_config_name] # here is the current activation pattern of the pulse device: active_ch = self._seq_gen_logic.get_active_channels() ch_to_change = {} # create something like a_ch = {1:True, 2:True} to switch # check whether the correct channels are already active, and if not # correct for that and activate and deactivate the appropriate ones: available_ch = self._get_available_ch() for ch_name in available_ch: # if the channel is in the activation, check whether it is active: if ch_name in activation_config: if apply_to_device: # if channel is not active but activation is needed (active=True), # then add that to ch_to_change to change the state of the channels: if not active_ch[ch_name] and active: ch_to_change[ch_name] = active # if channel is active but deactivation is needed (active=False), # then add that to ch_to_change to change the state of the channels: if active_ch[ch_name] and not active: ch_to_change[ch_name] = active else: # all other channel which are active should be deactivated: if active_ch[ch_name]: ch_to_change[ch_name] = False self._seq_gen_logic.set_active_channels(ch_to_change) def _get_available_ch(self): """ Helper method to get a list of all available channels. @return list: entries are the generic string names of the channels. """ config = self._seq_gen_logic.get_hardware_constraints()['activation_config'] available_ch = [] all_a_ch = [] all_d_ch = [] for conf in config: # extract all analog channels from the config curr_a_ch = [entry for entry in config[conf] if 'a_ch' in entry] curr_d_ch = [entry for entry in config[conf] if 'd_ch' in entry] # append all new analog channels to a temporary array for a_ch in curr_a_ch: if a_ch not in all_a_ch: all_a_ch.append(a_ch) # append all new digital channels to a temporary array for d_ch in curr_d_ch: if d_ch not in all_d_ch: all_d_ch.append(d_ch) all_a_ch.sort() all_d_ch.sort() available_ch.extend(all_a_ch) available_ch.extend(all_d_ch) return available_ch def do_optimize_pos(self): """ Perform an optimize position. """ #FIXME: Move this optimization routine to the tasks! curr_pos = self._confocal_logic.get_position() self._optimizer_logic.start_refocus(curr_pos, caller_tag='nuclear_operations_logic') # check just the state of the optimizer while self._optimizer_logic.getState() != 'idle' and not self._stop_requested: time.sleep(0.5) # use the position to move the scanner self._confocal_logic.set_position('nuclear_operations_logic', self._optimizer_logic.optim_pos_x, self._optimizer_logic.optim_pos_y, self._optimizer_logic.optim_pos_z) def _create_pulsed_odmr(self): """ Create the pulsed ODMR asset. """ #FIXME: Move this creation routine to the tasks! # generate: self._seq_gen_logic.generate_pulsedodmr(name='PulsedODMR', mw_time_ns=(self.electron_rabi_periode*1e9)/2, mw_freq_MHz=self.pulser_mw_freq/1e6, mw_amp_V=self.pulser_mw_amp, mw_channel=self.pulser_mw_ch, laser_time_ns=self.pulser_laser_length*1e9, laser_channel=self.pulser_laser_ch, laser_amp_V=self.pulser_laser_amp, wait_time_ns=self.pulser_idle_time*1e9) # sample: self._seq_gen_logic.sample_pulse_block_ensemble(ensemble_name='PulsedODMR', write_to_file=True, chunkwise=False) # upload: self._seq_gen_logic.upload_asset(asset_name='PulsedODMR') def _load_pulsed_odmr(self): """ Load a pulsed ODMR asset. """ #FIXME: Move this creation routine to the tasks! self._seq_gen_logic.load_asset(asset_name='PulsedODMR') def do_optimize_odmr_freq(self): """ Perform an ODMR measurement. """ #FIXME: Move this creation routine to the tasks! # make the odmr around the peak which is used for the mw drive: if self.mw_on_odmr_peak == 0: center_freq = self.odmr_meas_freq0 if self.mw_on_odmr_peak == 1: center_freq = self.odmr_meas_freq1 if self.mw_on_odmr_peak == 2: center_freq = self.odmr_meas_freq2 start_freq = center_freq - self.odmr_meas_freq_range/2 stop_freq = center_freq + self.odmr_meas_freq_range/2 name_tag = 'odmr_meas_for_nuclear_ops' param = self._odmr_logic.perform_odmr_measurement(freq_start=start_freq, freq_step=self.odmr_meas_step, freq_stop=stop_freq, power=self.odmr_meas_power, runtime=self.odmr_meas_runtime, fit_function='N14', save_after_meas=True, name_tag=name_tag) self.odmr_meas_freq0 = param['Freq. 0']['value'] self.odmr_meas_freq1 = param['Freq. 1']['value'] self.odmr_meas_freq2 = param['Freq. 2']['value'] curr_time = (datetime.datetime.now() - self.start_time).total_seconds() self.measured_odmr_list.append([curr_time, self.odmr_meas_freq0, self.odmr_meas_freq1, self.odmr_meas_freq2]) while self._odmr_logic.getState() != 'idle' and not self._stop_requested: time.sleep(0.5) def mw_on(self): """ Start the microwave device. """ self._odmr_logic.MW_on() def mw_off(self): """ Stop the microwave device. """ self._odmr_logic.MW_off() def set_mw_on_odmr_freq(self, freq, power): """ Set the microwave on a the specified freq with the specified power. """ self._odmr_logic.set_frequency(freq) self._odmr_logic.set_power(power) def save_nuclear_operation_measurement(self, name_tag=None, timestamp=None): """ Save the nuclear operation data. @param str name_tag: @param object timestamp: datetime.datetime object, from which everything can be created. """ filepath = self._save_logic.get_path_for_module(module_name='NuclearOperations') if timestamp is None: timestamp = datetime.datetime.now() if name_tag is not None and len(name_tag) > 0: filelabel1 = name_tag + '_nuclear_ops_xy_data' filelabel2 = name_tag + '_nuclear_ops_data_y_matrix' filelabel3 = name_tag + '_nuclear_ops_add_data_matrix' filelabel4 = name_tag + '_nuclear_ops_odmr_data' else: filelabel1 = '_nuclear_ops_data' filelabel2 = '_nuclear_ops_data_matrix' filelabel3 = '_nuclear_ops_add_data_matrix' filelabel4 = '_nuclear_ops_odmr_data' param = OrderedDict() param['Electron Rabi Period (ns)'] = self.electron_rabi_periode*1e9 param['Pulser Microwave Frequency (MHz)'] = self.pulser_mw_freq/1e6 param['Pulser MW amp (V)'] = self.pulser_mw_amp param['Pulser MW channel'] = self.pulser_mw_ch param['Nuclear Rabi period Trans 0 (micro-s)'] = self.nuclear_rabi_period0*1e6 param['Nuclear Trans freq 0 (MHz)'] = self.pulser_rf_freq0/1e6 param['Pulser RF amp 0 (V)'] = self.pulser_rf_amp0 param['Nuclear Rabi period Trans 1 (micro-s)'] = self.nuclear_rabi_period1*1e6 param['Nuclear Trans freq 1 (MHz)'] = self.pulser_rf_freq1/1e6 param['Pulser RF amp 1 (V)'] = self.pulser_rf_amp1 param['Pulser Rf channel'] = self.pulser_rf_ch param['Pulser Laser length (ns)'] = self.pulser_laser_length*1e9 param['Pulser Laser amp (V)'] = self.pulser_laser_amp param['Pulser Laser channel'] = self.pulser_laser_ch param['Number of single shot readouts per pulse'] = self.num_singleshot_readout param['Pulser idle Time (ns)'] = self.pulser_idle_time*1e9 param['Pulser Detect channel'] = self.pulser_detect_ch data1 = OrderedDict() data2 = OrderedDict() data3 = OrderedDict() data4 = OrderedDict() # Measurement Parameter: param[''] = self.current_meas_asset_name if self.current_meas_asset_name in ['Nuclear_Frequency_Scan']: param['x axis start (MHz)'] = self.x_axis_start/1e6 param['x axis step (MHz)'] = self.x_axis_step/1e6 param['Current '] = self.current_meas_point/1e6 data1['RF pulse frequency (MHz)'] = self.x_axis_list data1['Flip Probability'] = self.y_axis_list data2['RF pulse frequency matrix (MHz)'] = self.y_axis_matrix elif self.current_meas_asset_name in ['Nuclear_Rabi','QSD_-_Artificial_Drive', 'QSD_-_SWAP_FID','QSD_-_Entanglement_FID']: param['x axis start (micro-s)'] = self.x_axis_start*1e6 param['x axis step (micro-s)'] = self.x_axis_step*1e6 param['Current '] = self.current_meas_point*1e6 data1['RF pulse length (micro-s)'] = self.x_axis_list data1['Flip Probability'] = self.y_axis_list data2['RF pulse length matrix (micro-s)'] = self.y_axis_matrix else: param['x axis start'] = self.x_axis_start param['x axis step'] = self.x_axis_step param['Current '] = self.current_meas_point data1['x axis'] = self.x_axis_list data1['y axis'] = self.y_axis_list data2['y axis matrix)'] = self.y_axis_matrix data3['Additional Data Matrix'] = self.parameter_matrix data4['Measured ODMR Data Matrix'] = np.array(self.measured_odmr_list) param['Number of expected measurement points per run'] = self.x_axis_num_points param['Number of expected measurement runs'] = self.num_of_meas_runs param['Number of current measurement runs'] = self.num_of_current_meas_runs param['Current measurement index'] = self.current_meas_index param['Optimize Period ODMR (s)'] = self.optimize_period_odmr param['Optimize Period Confocal (s)'] = self.optimize_period_confocal param['current ODMR trans freq0 (MHz)'] = self.odmr_meas_freq0/1e6 param['current ODMR trans freq1 (MHz)'] = self.odmr_meas_freq1/1e6 param['current ODMR trans freq2 (MHz)'] = self.odmr_meas_freq2/1e6 param['Runtime of ODMR optimization (s)'] = self.odmr_meas_runtime param['Frequency Range ODMR optimization (MHz)'] = self.odmr_meas_freq_range/1e6 param['Frequency Step ODMR optimization (MHz)'] = self.odmr_meas_step/1e6 param['Power of ODMR optimization (dBm)'] = self.odmr_meas_power param['Selected ODMR trans freq (MHz)'] = self.mw_cw_freq/1e6 param['Selected ODMR trans power (dBm)'] = self.mw_cw_power param['Selected ODMR trans Peak'] = self.mw_on_odmr_peak param['Number of samples in the gated counter'] = self.gc_number_of_samples param['Number of samples per readout'] = self.gc_samples_per_readout param['Elapsed Time (s)'] = self.elapsed_time param['Start of measurement'] = self.start_time.strftime('%Y-%m-%d %H:%M:%S') self._save_logic.save_data(data1, filepath=filepath, parameters=param, filelabel=filelabel1, timestamp=timestamp) self._save_logic.save_data(data2, filepath=filepath, filelabel=filelabel2, timestamp=timestamp) self._save_logic.save_data(data4, filepath=filepath, filelabel=filelabel4, timestamp=timestamp) # self._save_logic.save_data(data3, # filepath=filepath, # filelabel=filelabel3, # timestamp=timestamp) self.log.info('Nuclear Operation data saved to:\n{0}'.format(filepath))
childresslab/MicrocavityExp1
logic/nuclear_operations_logic.py
Python
gpl-3.0
42,465
[ "Gaussian" ]
004b3dd74609f40f087258111529ea2dde2407cd68f93777adb0b6bd7745ebed
from collections import OrderedDict import decimal, datetime import pickle import logging import json import requests from django.core.cache import cache logger = logging.getLogger("foo") ch = logging.StreamHandler() ch.setLevel(logging.DEBUG) dec = decimal.Decimal # create formatter formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') # add formatter to ch ch.setFormatter(formatter) # add ch to logger logger.addHandler(ch) class Dump(object): def append_to_data(self, index, value): if self.first_run: try: float(value) return True except: self.valid_data = False logger.error("Error converting %s to float, skipping this row. (Index %s, %s)" % (value, index, self.b)) return False try: self.data[index].append(float(value)) except: raise Exception("Error converting %s to float. (Index %s)" % (value, index)) def handle(self, *args, **filters): from manual.models import GutterBumper self.data = [] for d in range(0, 34): self.data.append([]) for b in GutterBumper.objects.filter(**filters).all().order_by("date"): self.first_run = True self.valid_data = True self.add_valid_data = False self.b = b while self.first_run or self.add_valid_data: self.append_to_data(0, b.date.month) # month self.append_to_data(1, b.woke_up_at.hour) # woke up hour fell_asleep_hr = b.fell_asleep_at.hour if fell_asleep_hr < 13: fell_asleep_hr += 24 self.append_to_data(2, fell_asleep_hr ) # = models.TimeField(default=datetime.time(0, 00)) self.append_to_data(3, b.sleep_hrs) # = models.FloatField(default=0, blank=True, null=True, verbose_name="Sleep", help_text="Sleep this morning") self.append_to_data(4, b.work_hrs) # = models.FloatField(default=0, blank=True, null=True, verbose_name="Work") self.append_to_data(5, b.alone_hrs) # = models.FloatField(default=0, blank=True, null=True, verbose_name="Alone") self.append_to_data(6, b.friend_hrs) # = models.FloatField(default=0, blank=True, null=True, verbose_name="Friend") self.append_to_data(7, b.public_hrs) # = models.FloatField(default=0, blank=True, null=True, help_text="Not specifically hanging with people, but in a larger crowd", verbose_name="Public") self.append_to_data(8, b.relationship_hrs) # = models.FloatField(default=0, blank=True, null=True, verbose_name="Relationship") self.append_to_data(9, 10 if b.off else 0) # = models.BooleanField(default=False) self.append_to_data(10, b.sex or 0) # = models.IntegerField(default=0) self.append_to_data(11, 10 if b.interacted_with_art else 0) # = models.BooleanField(default=False) self.append_to_data(12, 10 if b.worked_out else 0) # = models.BooleanField(default=False) self.append_to_data(13, 10 if b.meditated else 0) # = models.BooleanField(default=False, verbose_name="meditated") self.append_to_data(14, 10 if b.left_the_house else 0) # = models.BooleanField(default=False) self.append_to_data(15, 10 if b.nature_time else 0) # = models.BooleanField(default=False) self.append_to_data(16, 10 if b.inbox_zero else 0) # = models.BooleanField(default=False) self.append_to_data(17, 10 if b.travelling_or_out_of_routine else 0) # = models.BooleanField(default=False, verbose_name="Travelling/Nonroutine") self.append_to_data(18, b.number_of_sleep_beers or 0) # = models.IntegerField(blank=True, null=True, verbose_name="# of sleep beers") self.append_to_data(19, b.number_of_fun_beers or 0) # = models.IntegerField(blank=True, null=True, verbose_name="# of fun beers") self.append_to_data(20, (b.number_of_sleep_beers or 0) + (b.number_of_fun_beers or 0)) # = models.IntegerField(blank=True, null=True, verbose_name="# of fun beers") self.append_to_data(21, b.presence) # = models.IntegerField(blank=True, null=True, help_text="1-10") self.append_to_data(22, b.happiness) # = models.IntegerField(blank=True, null=True, help_text="1-10") self.append_to_data(23, b.creativity) # = models.IntegerField(blank=True, null=True, help_text="1-10") self.append_to_data(24, b.morning_mood) # = models.IntegerField(blank=True, null=True, help_text="1-10") self.append_to_data(25, b.unbusy or 9) # = models.IntegerField(blank=True, null=True, help_text="1-10") notes_len = 0 try: notes_len = len(b.notes) except: pass self.append_to_data(26, notes_len) # = models.TextField(blank=True, null=True, default="86400") self.append_to_data(27, 10 if (b.date.month < 3 or b.date.month == 12) else 0) # winter self.append_to_data(28, 10 if (b.date.month >= 3 and b.date.month < 6) else 0) # spring self.append_to_data(29, 10 if (b.date.month >= 6 and b.date.month < 9) else 0) # summer self.append_to_data(30, 10 if (b.date.month >= 9 and b.date.month < 12) else 0) # fall self.append_to_data(31, 10 if (b.emotions.filter(name="Dentist Visit").count() > 0) else 0) # dentist self.append_to_data(32, b.moon_phase) self.append_to_data(33, 10 if b.in_a_relationship else 0) if self.first_run: self.first_run = False if self.valid_data: self.add_valid_data = True else: self.add_valid_data = False # Sanity check. total_len = None for index in range(0, len(self.data)): d = self.data[index] if not total_len: total_len = len(d) if len(d) != total_len: logger.critical("Unequal datasets %s!" % index) return pickle.dumps(self.data) def dump_data_pickle(**filters): c = Dump() today = datetime.date.today() - datetime.timedelta(days=1640) return c.handle(date__gt=today) # Via http://inamidst.com/code/moonphase.py def moon_position(now=None): if now is None: now = datetime.datetime.now() diff = now - datetime.datetime(2001, 1, 1) days = dec(diff.days) + (dec(diff.seconds) / dec(86400)) lunations = dec("0.20439731") + (days * dec("0.03386319269")) return 28 * (dec(0.5) - abs(dec(0.5) - (lunations % dec(1)))) CORRELATION_CHOICES = OrderedDict() CORRELATION_CHOICES["presence"] = "Presence" CORRELATION_CHOICES["happiness"] = "Happiness" CORRELATION_CHOICES["creativity"] = "Creativity" CORRELATION_CHOICES["morning_mood"] = "Morning mood" CORRELATION_CHOICES["unbusy"] = "Unbusy-ness" CORRELATION_CHOICES["sleep_hrs"] = "Hours of sleep" CORRELATION_CHOICES["work_hrs"] = "Hours spent working" CORRELATION_CHOICES["alone_hrs"] = "Hours spent alone" CORRELATION_CHOICES["neap_hrs"] = "Hours spent neither alone or in quality time" CORRELATION_CHOICES["friend_hrs"] = "Hours with friends" CORRELATION_CHOICES["public_hrs"] = "Hours in public" CORRELATION_CHOICES["relationship_hrs"] = "Hours with my significant other" CORRELATION_CHOICES["woke_up_at"] = "Woke up later" CORRELATION_CHOICES["fell_asleep_at"] = "Fell asleep later" CORRELATION_CHOICES["orgasm"] = "Off/Sex" CORRELATION_CHOICES["sex_count"] = "Sex Count" CORRELATION_CHOICES["interacted_with_art"] = "Interacted with art" CORRELATION_CHOICES["dentist"] = "Went to the dentist" CORRELATION_CHOICES["worked_out"] = "Worked out" CORRELATION_CHOICES["meditated"] = "Meditated" CORRELATION_CHOICES["left_the_house"] = "Left the house" CORRELATION_CHOICES["nature_time"] = "Nature time" CORRELATION_CHOICES["ate_green"] = "Ate Something Green" CORRELATION_CHOICES["in_a_relationship"] = "In a relationship" # CORRELATION_CHOICES["inbox_zero"] = "Inbox zero" CORRELATION_CHOICES["travelling_or_out_of_routine"] = "Travelling" CORRELATION_CHOICES["number_of_sleep_beers"] = "Beers to fall asleep" CORRELATION_CHOICES["number_of_fun_beers"] = "Beers for fun" # CORRELATION_CHOICES["number_of_total_beers"] = "Total beers" CORRELATION_CHOICES["notes length"] = "# of words in daily notes" CORRELATION_CHOICES["spring"] = "Spring" CORRELATION_CHOICES["summer"] = "Summer" CORRELATION_CHOICES["fall"] = "Fall" CORRELATION_CHOICES["winter"] = "Winter" CORRELATION_CHOICES["moon_phase"] = "Moon Fullness" # CORRELATION_CHOICES["month"] = "Month of the year" def save_correlations(): cols = [ "month", "woke_up_at", "fell_asleep_at", "sleep_hrs", "work_hrs", "alone_hrs", "neap_hrs", "friend_hrs", "public_hrs", "relationship_hrs", "orgasm", "sex_count", "interacted_with_art", "worked_out", "meditated", "left_the_house", "nature_time", "inbox_zero", "travelling_or_out_of_routine", "number_of_sleep_beers", "number_of_fun_beers", "number_of_total_beers", "presence", "happiness", "creativity", "morning_mood", "unbusy", "spoons", "notes length", "winter", "spring", "summer", "fall", "dentist", "moon_phase", "in_a_relationship", ] data = pickle.loads(dump_data_pickle()) headers = {'Content-type': 'application/json', } stripped_data = json.dumps({ "data": data }).replace(", ", ",") resp = requests.post("http://correlationbot.com", headers=headers, data=stripped_data) if resp.status_code != 200: print stripped_data print resp print resp.__dict__ print resp.content correlations = resp.json()["correlations"] # correlations = {} for c in correlations: c["col1"] = cols[c["column_1"]-1] c["col2"] = cols[c["column_2"]-1] try: c["col1_friendly"] = CORRELATION_CHOICES[cols[c["column_1"]-1]] except: c["col1_friendly"] = "Ignored" try: c["col2_friendly"] = CORRELATION_CHOICES[cols[c["column_2"]-1]] except: c["col2_friendly"] = "Ignored" if str(c["pearson"]) == "nan": c["pearson"] = 0 c["correlation"] = 0 cache.set("current_correlations", correlations) cache.set("current_correlation_choices", CORRELATION_CHOICES)
skoczen/skoczen
project/apps/manual/utils.py
Python
bsd-2-clause
10,880
[ "VisIt" ]
8eab8497637d716a0c2d910a165e35ce5febf4092933397e46ccfc9c53aebaff
# This file is part of Merlin. # Merlin is the Copyright (C)2008,2009,2010 of Robin K. Hansen, Elliot Rosemarine, Andreas Jacobsen. # Individual portions may be copyright by individual contributors, and # are included in this collective work with permission of the copyright # owners. # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA from Core.exceptions_ import PNickParseError from Core.db import session from Core.maps import Galaxy, Planet, Alliance, User from Core.loadable import loadable, route, require_planet class lookup(loadable): usage = " [x:y[:z]|alliance|user]" @route(loadable.coord) def planet_galaxy(self, message, user, params): # Planet if params.group(5) is not None: planet = Planet.load(*params.group(1,3,5)) if planet is None: message.reply("No planet with coords %s:%s:%s found" % params.group(1,3,5)) return message.reply(str(planet)) return # Galaxy else: galaxy = Galaxy.load(*params.group(1,3)) if galaxy is None: message.reply("No galaxy with coords %s:%s" % params.group(1,3)) return message.reply(str(galaxy)) return @route(r"") @require_planet def me(self, message, user, params): message.reply(str(user.planet)) @route(r"(\S+)") def user_alliance(self, message, user, params): alliance = Alliance.load(params.group(1)) if params.group(1) is not None else None # Alliance if alliance is not None: message.reply(str(alliance)) return # User if not self.is_user(user): raise PNickParseError elif not user.is_member(): message.reply("No alliance matching '%s' found" % (params.group(1),)) return else: lookup = User.load(params.group(1), exact=False, access="member") or User.load(params.group(1)) if lookup is None: message.reply("No alliance or user matching '%s' found" % (params.group(1),)) return elif lookup.planet is None: message.reply("User %s has not entered their planet details" % (lookup.name,)) return else: message.reply(str(lookup.planet)) return
ellonweb/merlin
Hooks/lookup.py
Python
gpl-2.0
3,072
[ "Galaxy" ]
1298c7b47d8b07542b9b37493917970ac5afede14214af9f8d3163f6b84e687c
from __future__ import absolute_import, division, print_function from itertools import chain from .utils_test import add, inc # noqa: F401 def ishashable(x): """ Is x hashable? Examples -------- >>> ishashable(1) True >>> ishashable([1]) False """ try: hash(x) return True except TypeError: return False def istask(x): """ Is x a runnable task? A task is a tuple with a callable first argument Examples -------- >>> inc = lambda x: x + 1 >>> istask((inc, 1)) True >>> istask(1) False """ return type(x) is tuple and x and callable(x[0]) def has_tasks(dsk, x): """Whether ``x`` has anything to compute. Returns True if: - ``x`` is a task - ``x`` is a key in ``dsk`` - ``x`` is a list that contains any tasks or keys """ if istask(x): return True try: if x in dsk: return True except: pass if isinstance(x, list): for i in x: if has_tasks(dsk, i): return True return False def preorder_traversal(task): """A generator to preorder-traverse a task.""" for item in task: if istask(item): for i in preorder_traversal(item): yield i elif isinstance(item, list): yield list for i in preorder_traversal(item): yield i else: yield item def _get_nonrecursive(d, x, maxdepth=1000): # Non-recursive. DAG property is checked upon reaching maxdepth. _list = lambda *args: list(args) # We construct a nested hierarchy of tuples to mimic the execution stack # of frames that Python would maintain for a recursive implementation. # A frame is associated with a single task from a Dask. # A frame tuple has three elements: # 1) The function for the task. # 2) The arguments for the task (typically keys in the Dask). # Arguments are stored in reverse order, and elements are popped # as they are evaluated. # 3) The calculated results of the arguments from (2). stack = [(lambda x: x, [x], [])] while True: func, args, results = stack[-1] if not args: val = func(*results) if len(stack) == 1: return val stack.pop() stack[-1][2].append(val) continue elif maxdepth and len(stack) > maxdepth: cycle = getcycle(d, x) if cycle: cycle = '->'.join(cycle) raise RuntimeError('Cycle detected in Dask: %s' % cycle) maxdepth = None key = args.pop() if isinstance(key, list): stack.append((_list, list(key[::-1]), [])) continue elif ishashable(key) and key in d: args.append(d[key]) continue elif istask(key): stack.append((key[0], list(key[:0:-1]), [])) else: results.append(key) def _get_recursive(d, x): # recursive, no cycle detection if isinstance(x, list): return [_get_recursive(d, k) for k in x] elif ishashable(x) and x in d: return _get_recursive(d, d[x]) elif istask(x): func, args = x[0], x[1:] args2 = [_get_recursive(d, k) for k in args] return func(*args2) else: return x def get(d, x, recursive=False): """ Get value from Dask Examples -------- >>> inc = lambda x: x + 1 >>> d = {'x': 1, 'y': (inc, 'x')} >>> get(d, 'x') 1 >>> get(d, 'y') 2 """ _get = _get_recursive if recursive else _get_nonrecursive if isinstance(x, list): return tuple(get(d, k) for k in x) elif x in d: return _get(d, x) raise KeyError("{0} is not a key in the graph".format(x)) def _deps(dsk, arg): """ Get dependencies from keys or tasks Helper function for get_dependencies. >>> dsk = {'x': 1, 'y': 2} >>> _deps(dsk, 'x') ['x'] >>> _deps(dsk, (add, 'x', 1)) ['x'] >>> _deps(dsk, ['x', 'y']) ['x', 'y'] >>> _deps(dsk, {'a': 'x'}) ['x'] >>> _deps(dsk, (add, 'x', (inc, 'y'))) # doctest: +SKIP ['x', 'y'] """ if istask(arg): result = [] for a in arg[1:]: result.extend(_deps(dsk, a)) return result if type(arg) is list: return sum([_deps(dsk, a) for a in arg], []) if type(arg) is dict: return sum([_deps(dsk, v) for v in arg.values()], []) try: if arg not in dsk: return [] except TypeError: # not hashable return [] return [arg] def get_dependencies(dsk, task, as_list=False): """ Get the immediate tasks on which this task depends >>> dsk = {'x': 1, ... 'y': (inc, 'x'), ... 'z': (add, 'x', 'y'), ... 'w': (inc, 'z'), ... 'a': (add, (inc, 'x'), 1)} >>> get_dependencies(dsk, 'x') set([]) >>> get_dependencies(dsk, 'y') set(['x']) >>> get_dependencies(dsk, 'z') # doctest: +SKIP set(['x', 'y']) >>> get_dependencies(dsk, 'w') # Only direct dependencies set(['z']) >>> get_dependencies(dsk, 'a') # Ignore non-keys set(['x']) """ args = [dsk[task]] result = [] while args: arg = args.pop() if istask(arg): args.extend(arg[1:]) elif type(arg) is list: args.extend(arg) else: result.append(arg) if not result: return [] if as_list else set() rv = [] for x in result: rv.extend(_deps(dsk, x)) return rv if as_list else set(rv) def get_deps(dsk): """ Get dependencies and dependents from dask dask graph >>> dsk = {'a': 1, 'b': (inc, 'a'), 'c': (inc, 'b')} >>> dependencies, dependents = get_deps(dsk) >>> dependencies {'a': set([]), 'c': set(['b']), 'b': set(['a'])} >>> dependents {'a': set(['b']), 'c': set([]), 'b': set(['c'])} """ dependencies = dict((k, get_dependencies(dsk, k)) for k in dsk) dependents = reverse_dict(dependencies) return dependencies, dependents def flatten(seq): """ >>> list(flatten([1])) [1] >>> list(flatten([[1, 2], [1, 2]])) [1, 2, 1, 2] >>> list(flatten([[[1], [2]], [[1], [2]]])) [1, 2, 1, 2] >>> list(flatten(((1, 2), (1, 2)))) # Don't flatten tuples [(1, 2), (1, 2)] >>> list(flatten((1, 2, [3, 4]))) # support heterogeneous [1, 2, 3, 4] """ if isinstance(seq, str): yield seq else: for item in seq: if isinstance(item, list): for item2 in flatten(item): yield item2 else: yield item def reverse_dict(d): """ >>> a, b, c = 'abc' >>> d = {a: [b, c], b: [c]} >>> reverse_dict(d) # doctest: +SKIP {'a': set([]), 'b': set(['a']}, 'c': set(['a', 'b'])} """ terms = list(d.keys()) + list(chain.from_iterable(d.values())) result = dict((t, set()) for t in terms) for k, vals in d.items(): for val in vals: result[val].add(k) return result def subs(task, key, val): """ Perform a substitution on a task Examples -------- >>> subs((inc, 'x'), 'x', 1) # doctest: +SKIP (inc, 1) """ if not istask(task): try: if type(task) is type(key) and task == key: return val except Exception: pass if isinstance(task, list): return [subs(x, key, val) for x in task] return task newargs = [] for arg in task[1:]: if istask(arg): arg = subs(arg, key, val) elif isinstance(arg, list): arg = [subs(x, key, val) for x in arg] elif type(arg) is type(key) and arg == key: arg = val newargs.append(arg) return task[:1] + tuple(newargs) def _toposort(dsk, keys=None, returncycle=False, dependencies=None): # Stack-based depth-first search traversal. This is based on Tarjan's # method for topological sorting (see wikipedia for pseudocode) if keys is None: keys = dsk elif not isinstance(keys, list): keys = [keys] if not returncycle: ordered = [] # Nodes whose descendents have been completely explored. # These nodes are guaranteed to not be part of a cycle. completed = set() # All nodes that have been visited in the current traversal. Because # we are doing depth-first search, going "deeper" should never result # in visiting a node that has already been seen. The `seen` and # `completed` sets are mutually exclusive; it is okay to visit a node # that has already been added to `completed`. seen = set() if dependencies is None: dependencies = dict((k, get_dependencies(dsk, k)) for k in dsk) for key in keys: if key in completed: continue nodes = [key] while nodes: # Keep current node on the stack until all descendants are visited cur = nodes[-1] if cur in completed: # Already fully traversed descendants of cur nodes.pop() continue seen.add(cur) # Add direct descendants of cur to nodes stack next_nodes = [] for nxt in dependencies[cur]: if nxt not in completed: if nxt in seen: # Cycle detected! cycle = [nxt] while nodes[-1] != nxt: cycle.append(nodes.pop()) cycle.append(nodes.pop()) cycle.reverse() if returncycle: return cycle else: cycle = '->'.join(cycle) raise RuntimeError('Cycle detected in Dask: %s' % cycle) next_nodes.append(nxt) if next_nodes: nodes.extend(next_nodes) else: # cur has no more descendants to explore, so we're done with it if not returncycle: ordered.append(cur) completed.add(cur) seen.remove(cur) nodes.pop() if returncycle: return [] return ordered def toposort(dsk, dependencies=None): """ Return a list of keys of dask sorted in topological order.""" return _toposort(dsk, dependencies=dependencies) def getcycle(d, keys): """ Return a list of nodes that form a cycle if Dask is not a DAG. Returns an empty list if no cycle is found. ``keys`` may be a single key or list of keys. Examples -------- >>> d = {'x': (inc, 'z'), 'y': (inc, 'x'), 'z': (inc, 'y')} >>> getcycle(d, 'x') ['x', 'z', 'y', 'x'] See Also -------- isdag """ return _toposort(d, keys=keys, returncycle=True) def isdag(d, keys): """ Does Dask form a directed acyclic graph when calculating keys? ``keys`` may be a single key or list of keys. Examples -------- >>> inc = lambda x: x + 1 >>> isdag({'x': 0, 'y': (inc, 'x')}, 'y') True >>> isdag({'x': (inc, 'y'), 'y': (inc, 'x')}, 'y') False See Also -------- getcycle """ return not getcycle(d, keys) def list2(L): return list(L) def quote(x): """ Ensure that this value remains this value in a dask graph Some values in dask graph take on special meaning. Lists become iterators, tasks get executed. Sometimes we want to ensure that our data is not interpreted but remains literal. >>> quote([1, 2, 3]) [1, 2, 3] >>> quote((add, 1, 2)) # doctest: +SKIP (tuple, [add, 1, 2]) """ if istask(x): return (tuple, list(map(quote, x))) return x
cowlicks/dask
dask/core.py
Python
bsd-3-clause
12,037
[ "VisIt" ]
e33d1ffec7a0db6f7926d7dcf760dc8f47b9946b8e31d23a07e3542bc04f34a6
# # Gramps - a GTK+/GNOME based genealogy program # # Copyright (C) 2000-2007 Donald N. Allingham # Copyright (C) 2009 Brian G. Matherly # Copyright (C) 2009 Gary Burton # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # """ Provide the management of databases. This includes opening, renaming, creating, and deleting of databases. """ #------------------------------------------------------------------------- # # Standard python modules # #------------------------------------------------------------------------- import os import time import copy import subprocess from urllib.parse import urlparse import logging import re #------------------------------------------------------------------------- # # GTK/Gnome modules # #------------------------------------------------------------------------- from gi.repository import Gdk from gi.repository import Gtk from gi.repository import Pango #------------------------------------------------------------------------- # # gramps modules # #------------------------------------------------------------------------- from .display import display_help from gramps.gen.const import URL_WIKISTRING, URL_MANUAL_PAGE from .user import User from .dialog import ErrorDialog, QuestionDialog, QuestionDialog2, ICON from .pluginmanager import GuiPluginManager from gramps.cli.clidbman import CLIDbManager, NAME_FILE, time_val, UNAVAILABLE from .managedwindow import ManagedWindow from .ddtargets import DdTargets from gramps.gen.recentfiles import rename_filename, remove_filename from .glade import Glade from gramps.gen.db.exceptions import DbException from gramps.gen.db.utils import make_database, open_database from gramps.gen.config import config from .listmodel import ListModel from gramps.gen.constfunc import win from gramps.gen.plug import BasePluginManager from gramps.gen.const import GRAMPS_LOCALE as glocale _ = glocale.translation.gettext #------------------------------------------------------------------------- # # set up logging # #------------------------------------------------------------------------- LOG = logging.getLogger(".DbManager") #------------------------------------------------------------------------- # # constants # #------------------------------------------------------------------------- if win(): _RCS_FOUND = os.system("rcs -V >nul 2>nul") == 0 if _RCS_FOUND and "TZ" not in os.environ: # RCS requires the "TZ" variable be set. os.environ["TZ"] = str(time.timezone) else: _RCS_FOUND = os.system("rcs -V >/dev/null 2>/dev/null") == 0 _RETURN = Gdk.keyval_from_name("Return") _KP_ENTER = Gdk.keyval_from_name("KP_Enter") WIKI_HELP_PAGE = _('%s_-_Manage_Family_Trees') % URL_MANUAL_PAGE WIKI_HELP_SEC = _('Family_Trees_manager_window') ARCHIVE = "rev.gramps" ARCHIVE_V = "rev.gramps,v" NAME_COL = 0 PATH_COL = 1 FILE_COL = 2 DATE_COL = 3 DSORT_COL = 4 OPEN_COL = 5 ICON_COL = 6 BACKEND_COL = 7 RCS_BUTTON = {True : _('_Extract'), False : _('_Archive')} class Information(ManagedWindow): def __init__(self, uistate, data, track): super().__init__(uistate, track, self, modal=True) self.window = Gtk.Dialog() self.set_window(self.window, None, _("Database Information")) self.setup_configs('interface.information', 600, 400) self.ok = self.window.add_button(_('_OK'), Gtk.ResponseType.OK) self.ok.connect('clicked', self.on_ok_clicked) s = Gtk.ScrolledWindow() titles = [ (_('Setting'), 0, 150), (_('Value'), 1, 400) ] treeview = Gtk.TreeView() model = ListModel(treeview, titles) for key, value in sorted(data.items()): model.add((key, str(value),), key) s.add(treeview) self.window.vbox.pack_start(s, True, True, 0) self.show() def on_ok_clicked(self, obj): self.window.close() def build_menu_names(self, obj): return (_('Database Information'), None) class DbManager(CLIDbManager, ManagedWindow): """ Database Manager. Opens a database manager window that allows users to create, rename, delete and open databases. """ ICON_MAP = { CLIDbManager.ICON_NONE : None, CLIDbManager.ICON_RECOVERY : 'dialog-error', CLIDbManager.ICON_LOCK : 'gramps-lock', CLIDbManager.ICON_OPEN : 'document-open', } BUSY_CURSOR = Gdk.Cursor.new_for_display(Gdk.Display.get_default(), Gdk.CursorType.WATCH) def __init__(self, uistate, dbstate, viewmanager, parent=None): """ Create the top level window from the glade description, and extracts the GTK widgets that are needed. """ window_id = self ManagedWindow.__init__(self, uistate, [], window_id, modal=True) CLIDbManager.__init__(self, dbstate) self.glade = Glade(toplevel='dbmanager') self.top = self.glade.toplevel self.set_window(self.top, None, None) self.setup_configs('interface.dbmanager', 780, 350) self.viewmanager = viewmanager for attr in ['connect_btn', 'cancel_btn', 'new_btn', 'remove_btn', 'info_btn', 'dblist', 'rename_btn', 'convert_btn', 'repair_btn', 'rcs_btn', 'msg', 'close_btn']: setattr(self, attr, self.glade.get_object(attr)) self.model = None self.column = None self.lock_file = None self.data_to_delete = None self.selection = self.dblist.get_selection() # For already loaded database: self._current_node = None self.__connect_signals() self.__build_interface() self._populate_model() self.before_change = "" self.after_change = "" self._select_default() self.user = User(error=ErrorDialog, parent=parent, callback=self.uistate.pulse_progressbar, uistate=self.uistate) def build_menu_names(self, obj): ''' This window can have children, but they are modal so no submenu is visible''' submenu_label = " " menu_label = _('Family Trees') return (menu_label, submenu_label) def _select_default(self): """ Select the current, or latest, tree. """ # If already loaded database, center on it: if self._current_node: store, node = self.selection.get_selected() tree_path = store.get_path(self._current_node) self.selection.select_path(tree_path) self.dblist.scroll_to_cell(tree_path, None, 1, 0.5, 0) def __connect_signals(self): """ Connects the signals to the buttons on the interface. """ ddtarget = DdTargets.URI_LIST self.top.drag_dest_set(Gtk.DestDefaults.ALL, [DdTargets.URI_LIST.target()], Gdk.DragAction.COPY) self.remove_btn.connect('clicked', self.__remove_db) self.new_btn.connect('clicked', self.__new_db) self.rename_btn.connect('clicked', self.__rename_db) self.convert_btn.connect('clicked', self.__convert_db_ask) self.info_btn.connect('clicked', self.__info_db) self.close_btn.connect('clicked', self.__close_db) self.repair_btn.connect('clicked', self.__repair_db) self.selection.connect('changed', self.__selection_changed) self.dblist.connect('button-press-event', self.__button_press) self.dblist.connect('key-press-event', self.__key_press) self.top.connect('drag_data_received', self.__drag_data_received) self.top.connect('drag_motion', drag_motion) self.top.connect('drag_drop', drop_cb) self.define_help_button( self.glade.get_object('help_btn'), WIKI_HELP_PAGE, WIKI_HELP_SEC) if _RCS_FOUND: self.rcs_btn.connect('clicked', self.__rcs) def define_help_button(self, button, webpage='', section=''): button.connect('clicked', lambda x: display_help(webpage, section)) def __button_press(self, obj, event): """ Checks for a double click event. In the tree view, we want to treat a double click as if it was OK button press. However, we have to make sure that an item was selected first. """ if (event.type == Gdk.EventType.DOUBLE_BUTTON_PRESS and event.button == 1): if self.connect_btn.get_property('sensitive'): self.top.response(Gtk.ResponseType.OK) return True return False def __key_press(self, obj, event): """ Grab ENTER so it does not start editing the cell, but behaves like double click instead """ if event.keyval in (_RETURN, _KP_ENTER): if self.connect_btn.get_property('sensitive'): self.top.response(Gtk.ResponseType.OK) return True return False def __selection_changed(self, selection): """ Called when the selection is changed in the TreeView. """ self.__update_buttons(selection) def __update_buttons(self, selection): """ What we are trying to detect is the selection or unselection of a row. When a row is unselected, the Open, Rename, and Remove buttons are set insensitive. If a row is selected, the rename and remove buttons are disabled, and the Open button is disabled if the row represents a open database. """ # Get the current selection store, node = selection.get_selected() if not _RCS_FOUND: # it's not in Windows self.rcs_btn.set_visible(False) # if nothing is selected if not node: self.connect_btn.set_sensitive(False) self.rename_btn.set_sensitive(False) self.convert_btn.set_sensitive(False) self.info_btn.set_sensitive(False) self.close_btn.set_sensitive(False) self.rcs_btn.set_sensitive(False) self.repair_btn.set_sensitive(False) self.remove_btn.set_sensitive(False) return path = self.model.get_path(node) if path is None: return is_rev = len(path.get_indices()) > 1 self.rcs_btn.set_label(RCS_BUTTON[is_rev]) if store.get_value(node, ICON_COL) == 'document-open': self.close_btn.set_sensitive(True) self.convert_btn.set_sensitive(False) self.connect_btn.set_sensitive(False) if _RCS_FOUND: self.rcs_btn.set_sensitive(True) elif store.get_value(node, BACKEND_COL) == UNAVAILABLE: self.close_btn.set_sensitive(False) self.convert_btn.set_sensitive(False) self.connect_btn.set_sensitive(False) self.rcs_btn.set_sensitive(False) self.repair_btn.set_sensitive(False) else: self.close_btn.set_sensitive(False) dbid = config.get('database.backend') backend_type = self.get_backend_name_from_dbid(dbid) if (store.get_value(node, ICON_COL) in [None, ""] and store.get_value(node, BACKEND_COL) != backend_type): self.convert_btn.set_sensitive(True) else: self.convert_btn.set_sensitive(False) self.connect_btn.set_sensitive(not is_rev) if _RCS_FOUND and is_rev: self.rcs_btn.set_sensitive(True) else: self.rcs_btn.set_sensitive(False) if store.get_value(node, ICON_COL) == 'dialog-error': path = store.get_value(node, PATH_COL) backup = os.path.join(path, "person.gbkp") self.repair_btn.set_sensitive(os.path.isfile(backup)) else: self.repair_btn.set_sensitive(False) self.rename_btn.set_sensitive(True) self.info_btn.set_sensitive(True) self.remove_btn.set_sensitive(True) self.new_btn.set_sensitive(True) def __build_interface(self): """ Builds the columns for the TreeView. The columns are: Icon, Database Name, Last Modified, Backend Type The Icon column gets its data from column 6 of the database model. It is expecting either None, or a GTK stock icon name The Database Name column is an editable column. We connect to the 'edited' signal, so that we can change the name when the user changes the column. The last accessed column simply displays the last time famtree was opened. The Backend Type column is a string based on database backend. """ # Put some help on the buttons: dbid = config.get('database.backend') backend_type = self.get_backend_name_from_dbid(dbid) if backend_type == UNAVAILABLE: dbid = 'sqlite' config.set('database.backend', dbid) backend_type = self.get_backend_name_from_dbid(dbid) self.new_btn.set_tooltip_text(backend_type) # build the database name column render = Gtk.CellRendererText() render.set_property('ellipsize', Pango.EllipsizeMode.END) render.connect('edited', self.__change_name) render.connect('editing-canceled', self.__stop_edit) render.connect('editing-started', self.__start_edit) self.column = Gtk.TreeViewColumn(_('Family Tree name'), render, text=NAME_COL) self.column.set_sort_column_id(NAME_COL) self.column.set_sort_indicator(True) self.column.set_resizable(True) self.column.set_min_width(250) self.dblist.append_column(self.column) self.name_renderer = render # build the icon column render = Gtk.CellRendererPixbuf() #icon_column = Gtk.TreeViewColumn(_('Status'), render, #icon_name=ICON_COL) icon_column = Gtk.TreeViewColumn(_('Status'), render) icon_column.set_cell_data_func(render, bug_fix) icon_column.set_sort_column_id(ICON_COL) self.dblist.append_column(icon_column) # build the backend column render = Gtk.CellRendererText() column = Gtk.TreeViewColumn(_('Database Type'), render, text=BACKEND_COL) column.set_sort_column_id(BACKEND_COL) column.set_sort_indicator(True) column.set_resizable(True) self.dblist.append_column(column) # build the last accessed column render = Gtk.CellRendererText() column = Gtk.TreeViewColumn(_('Last accessed'), render, text=DATE_COL) column.set_sort_column_id(DSORT_COL) self.dblist.append_column(column) def __populate(self): """ Builds the data and the display model. """ self._populate_cli() self._populate_model() def _populate_model(self): """ Builds the display model. """ self.model = Gtk.TreeStore(str, str, str, str, int, bool, str, str) #use current names to set up the model self._current_node = None last_accessed_node = None last_accessed = 0 for items in self.current_names: data = list(items[:8]) backend_type = self.get_backend_name_from_dbid(data[BACKEND_COL]) node = self.model.append(None, data[:-1] + [backend_type]) # For already loaded database, set current_node: if self.dbstate.is_open() and \ self.dbstate.db.get_save_path() == data[1]: self._current_node = node if data[DSORT_COL] > last_accessed: last_accessed = data[DSORT_COL] last_accessed_node = node for rdata in find_revisions(os.path.join(items[1], ARCHIVE_V)): data = [rdata[2], rdata[0], items[1], rdata[1], 0, False, "", backend_type] self.model.append(node, data) if self._current_node is None: self._current_node = last_accessed_node self.model.set_sort_column_id(NAME_COL, Gtk.SortType.ASCENDING) self.dblist.set_model(self.model) def existing_name(self, name, skippath=None): """ Return true if a name is present in the model already. If skippath given, the name of skippath is not considered """ iter = self.model.get_iter_first() while iter: path = self.model.get_path(iter) if path == skippath: pass else: itername = self.model.get_value(iter, NAME_COL) if itername.strip() == name.strip(): return True iter = self.model.iter_next(iter) return False def run(self): """ Runs the dialog, returning None if nothing has been chosen, or the path and name if something has been selected """ self.show() self.__update_buttons(self.selection) while True: value = self.top.run() if value == Gtk.ResponseType.OK: store, node = self.selection.get_selected() # don't open a locked file if store.get_value(node, ICON_COL) == 'gramps-lock': self.__ask_to_break_lock(store, node) continue # don't open a version if len(store.get_path(node).get_indices()) > 1: continue if node: del self.selection del self.name_renderer self.close() path = store.get_value(node, PATH_COL) return (path, store.get_value(node, NAME_COL)) else: del self.selection del self.name_renderer if value != Gtk.ResponseType.DELETE_EVENT: self.close() return None def __ask_to_break_lock(self, store, node): """ Prompts the user for permission to break the lock file that another process has set on the file. """ path = store.get_path(node) self.lock_file = store[path][PATH_COL] QuestionDialog( _("Break the lock on the '%s' database?") % store[path][0], _("Gramps believes that someone else is actively editing " "this database. You cannot edit this database while it " "is locked. If no one is editing the database you may " "safely break the lock. However, if someone else is editing " "the database and you break the lock, you may corrupt the " "database."), _("Break lock"), self.__really_break_lock, parent=self.top) def __really_break_lock(self): """ Deletes the lock file associated with the selected database, then updates the display appropriately. """ try: self.break_lock(self.lock_file) store, node = self.selection.get_selected() dbpath = store.get_value(node, PATH_COL) (tval, last) = time_val(dbpath) store.set_value(node, OPEN_COL, 0) store.set_value(node, ICON_COL, "") # see bug_fix store.set_value(node, DATE_COL, last) store.set_value(node, DSORT_COL, tval) except IOError: return def __stop_edit(self, *args): self.name_renderer.set_property('editable', False) self.__update_buttons(self.selection) def __start_edit(self, *args): """ Do not allow to click Load while changing name, to force users to finish the action of renaming. Hack around the fact that clicking button sends a 'editing-canceled' signal loosing the new name """ self.connect_btn.set_sensitive(False) self.rename_btn.set_sensitive(False) self.convert_btn.set_sensitive(False) self.info_btn.set_sensitive(False) self.rcs_btn.set_sensitive(False) self.repair_btn.set_sensitive(False) self.remove_btn.set_sensitive(False) self.new_btn.set_sensitive(False) def __change_name(self, renderer_sel, path, new_text): """ Change the name of the database. This is a callback from the column, which has been marked as editable. If the new string is empty, do nothing. Otherwise, renaming the database is simply changing the contents of the name file. """ # kill special characters so can use as file name in backup. new_text = re.sub(r"[':<>|,;=\"\[\]\.\+\*\/\?\\]", "_", new_text) #path is a string, convert to TreePath first path = Gtk.TreePath(path=path) if len(new_text) > 0: node = self.model.get_iter(path) old_text = self.model.get_value(node, NAME_COL) if self.model.get_value(node, ICON_COL) == 'document-open': # this database is loaded. We must change the title # in case we change the name several times before quitting, # we save the first old name. if self.before_change == "": self.before_change = old_text self.after_change = new_text if not old_text.strip() == new_text.strip(): if len(path.get_indices()) > 1: self.__rename_revision(path, new_text) else: self.__rename_database(path, new_text) self.name_renderer.set_property('editable', False) self.__update_buttons(self.selection) def __rename_revision(self, path, new_text): """ Renames the RCS revision using the rcs command. The rcs command is in the format of: rcs -mREV:NEW_NAME archive """ node = self.model.get_iter(path) db_dir = self.model.get_value(node, FILE_COL) rev = self.model.get_value(node, PATH_COL) archive = os.path.join(db_dir, ARCHIVE_V) cmd = ["rcs", "-x,v", "-m%s:%s" % (rev, new_text), archive] proc = subprocess.Popen(cmd, stderr=subprocess.PIPE) status = proc.wait() message = "\n".join(proc.stderr.readlines()) proc.stderr.close() del proc if status != 0: ErrorDialog(_("Rename failed"), _("An attempt to rename a version failed " "with the following message:\n\n%s") % message, parent=self.top) else: self.model.set_value(node, NAME_COL, new_text) #scroll to new position store, node = self.selection.get_selected() tree_path = store.get_path(node) self.dblist.scroll_to_cell(tree_path, None, False, 0.5, 0.5) def __rename_database(self, path, new_text): """ Renames the database by writing the new value to the name.txt file """ new_text = new_text.strip() node = self.model.get_iter(path) filename = self.model.get_value(node, FILE_COL) if self.existing_name(new_text, skippath=path): ErrorDialog(_("Could not rename the Family Tree."), _("Family Tree already exists, choose a unique name."), parent=self.top) return old_text, new_text = self.rename_database(filename, new_text) if old_text is not None: rename_filename(old_text, new_text) self.model.set_value(node, NAME_COL, new_text) #scroll to new position store, node = self.selection.get_selected() tree_path = store.get_path(node) self.dblist.scroll_to_cell(tree_path, None, False, 0.5, 0.5) def __rcs(self, obj): """ Callback for the RCS button. If the tree path is > 1, then we are on an RCS revision, in which case we can check out. If not, then we can only check in. """ store, node = self.selection.get_selected() tree_path = store.get_path(node) if len(tree_path.get_indices()) > 1: parent_node = store.get_iter((tree_path[0],)) parent_name = store.get_value(parent_node, NAME_COL) name = store.get_value(node, NAME_COL) revision = store.get_value(node, PATH_COL) db_path = store.get_value(node, FILE_COL) self.__checkout_copy(parent_name, name, revision, db_path) else: base_path = self.dbstate.db.get_save_path() archive = os.path.join(base_path, ARCHIVE) _check_in(self.dbstate.db, archive, self.user, self.__start_cursor, parent=self.window) self.__end_cursor() self.__populate() self._select_default() def __checkout_copy(self, parent_name, name, revision, db_path): """ Create a new database, then extracts a revision from RCS and imports it into the db """ dbid = config.get('database.backend') new_path, newname = self._create_new_db("%s : %s" % (parent_name, name), dbid=dbid) self.__start_cursor(_("Extracting archive...")) dbase = make_database(dbid) dbase.load(new_path) self.__start_cursor(_("Importing archive...")) check_out(dbase, revision, db_path, self.user) self.__end_cursor() dbase.close(user=self.user) def __remove_db(self, obj): """ Callback associated with the Remove button. Get the selected row and data, then call the verification dialog. """ store, node = self.selection.get_selected() path = store.get_path(node) self.data_to_delete = store[path] if len(path.get_indices()) == 1: QuestionDialog( _("Remove the '%s' Family Tree?") % self.data_to_delete[0], _("Removing this Family Tree will permanently destroy " "the data."), _("Remove Family Tree"), self.__really_delete_db, parent=self.top) else: rev = self.data_to_delete[0] parent = store[(path[0],)][0] QuestionDialog(_("Remove the '%(revision)s' version " "of '%(database)s'" ) % {'revision' : rev, 'database' : parent}, _("Removing this version will prevent you from " "extracting it in the future."), _("Remove version"), self.__really_delete_version, parent=self.top) def __really_delete_db(self): """ Delete the selected database. If the database is open, close it first. Then scan the database directory, deleting the files, and finally removing the directory. """ # close the database if the user has requested to delete the # active database if self.data_to_delete[PATH_COL] == self.active: self.uistate.viewmanager.close_database() store, node = self.selection.get_selected() path = store.get_path(node) node = self.model.get_iter(path) filename = self.model.get_value(node, FILE_COL) try: with open(filename, "r", encoding='utf-8') as name_file: file_name_to_delete = name_file.read() remove_filename(file_name_to_delete) directory = self.data_to_delete[1] for (top, dirs, files) in os.walk(directory): for filename in files: os.unlink(os.path.join(top, filename)) os.rmdir(directory) except (IOError, OSError) as msg: ErrorDialog(_("Could not delete Family Tree"), str(msg), parent=self.top) # rebuild the display self.__populate() self._select_default() def __really_delete_version(self): """ Delete the selected database. If the database is open, close it first. Then scan the database directory, deleting the files, and finally removing the directory. """ db_dir = self.data_to_delete[FILE_COL] rev = self.data_to_delete[PATH_COL] archive = os.path.join(db_dir, ARCHIVE_V) cmd = ["rcs", "-x,v", "-o%s" % rev, "-q", archive] proc = subprocess.Popen(cmd, stderr=subprocess.PIPE) status = proc.wait() message = "\n".join(proc.stderr.readlines()) proc.stderr.close() del proc if status != 0: ErrorDialog(_("Deletion failed"), _("An attempt to delete a version failed " "with the following message:\n\n%s") % message, parent=self.top) # rebuild the display self.__populate() self._select_default() def __convert_db_ask(self, obj): """ Ask to convert a closed family tree into the default database backend. """ store, node = self.selection.get_selected() name = store[node][0] dirname = store[node][1] dbid = config.get('database.backend') backend_type = self.get_backend_name_from_dbid(dbid) QuestionDialog( _("Convert the '%s' database?") % name, _("Do you wish to convert this family tree into a " "%(database_type)s database?") % {'database_type': backend_type}, _("Convert"), lambda: self.__convert_db(name, dirname), parent=self.top) def __convert_db(self, name, dirname): """ Actually convert the family tree into the default database backend. """ try: db = open_database(name) except: ErrorDialog(_("Opening the '%s' database") % name, _("An attempt to convert the database failed. " "Perhaps it needs updating."), parent=self.top) return plugin_manager = GuiPluginManager.get_instance() export_function = None for plugin in plugin_manager.get_export_plugins(): if plugin.get_extension() == "gramps": export_function = plugin.get_export_function() break ## Next, get an XML dump: if export_function is None: ErrorDialog(_("Converting the '%s' database") % name, _("An attempt to export the database failed."), parent=self.top) db.close(user=self.user) return self.__start_cursor(_("Converting data...")) xml_file = os.path.join(dirname, "backup.gramps") export_function(db, xml_file, self.user) db.close(user=self.user) count = 1 new_text = "%s %s" % (name, _("(Converted #%d)") % count) while self.existing_name(new_text): count += 1 new_text = "%s %s" % (name, _("(Converted #%d)") % count) dbid = config.get('database.backend') new_path, newname = self._create_new_db(new_text, dbid=dbid, edit_entry=False) ## Create a new database of correct type: dbase = make_database(dbid) dbase.load(new_path) ## import from XML import_function = None for plugin in plugin_manager.get_import_plugins(): if plugin.get_extension() == "gramps": import_function = plugin.get_import_function() if import_function is None: ErrorDialog(_("Converting the '%s' database") % name, _("An attempt to import into the database failed."), parent=self.top) else: import_function(dbase, xml_file, self.user) self.__end_cursor() dbase.close(user=self.user) self.__populate() self._select_default() def __rename_db(self, obj): """ Start the rename process by calling the start_editing option on the line with the cursor. """ store, node = self.selection.get_selected() path = self.model.get_path(node) self.name_renderer.set_property('editable', True) self.dblist.set_cursor(path, self.column, True) def __close_db(self, obj): """ Close the database. Set the displayed line correctly, set the dbstate to no_database, update the sensitivity of the buttons in this dialogue box and get viewmanager to manage the main window and plugable views. """ store, node = self.selection.get_selected() dbpath = store.get_value(node, PATH_COL) (tval, last) = time_val(dbpath) store.set_value(node, OPEN_COL, 0) store.set_value(node, ICON_COL, "") # see bug_fix store.set_value(node, DATE_COL, last) store.set_value(node, DSORT_COL, tval) self.dbstate.no_database() self.__update_buttons(self.selection) self.viewmanager.post_close_db() def __info_db(self, obj): """ Show info on this database. """ store, node = self.selection.get_selected() name = store[node][0] dirname = store[node][1] # if this is open, get info from there, otherwise, temp open? summary = self.get_dbdir_summary(dirname, name) Information(self.uistate, summary, track=self.track) def __repair_db(self, obj): """ Start the repair process by calling the start_editing option on the line with the cursor. """ store, node = self.selection.get_selected() dirname = store[node][1] #First ask user if he is really sure :-) yes_no = QuestionDialog2( _("Repair Family Tree?"), _("If you click %(bold_start)sProceed%(bold_end)s, Gramps will " "attempt to recover your Family Tree from the last good " "backup. There are several ways this can cause unwanted " "effects, so %(bold_start)sbackup%(bold_end)s the " "Family Tree first.\nThe Family Tree you have selected " "is stored in %(dirname)s.\n\n" "Before doing a repair, verify that the Family Tree can " "really no longer be opened, as the database back-end can " "recover from some errors automatically.\n\n" "%(bold_start)sDetails:%(bold_end)s Repairing a Family Tree " "actually uses the last backup of the Family Tree, which " "Gramps stored on last use. If you have worked for " "several hours/days without closing Gramps, then all " "this information will be lost! If the repair fails, then " "the original Family Tree will be lost forever, hence " "a backup is needed. If the repair fails, or too much " "information is lost, you can fix the original " "Family Tree manually. For details, see the webpage\n" "%(gramps_wiki_recover_url)s\n" "Before doing a repair, try to open the Family Tree " "in the normal manner. Several errors that trigger the " "repair button can be fixed automatically. " "If this is the case, you can disable the repair button " "by removing the file %(recover_file)s in the " "Family Tree directory." ) % {'bold_start': '<b>', 'bold_end': '</b>', 'recover_file': '<i>need_recover</i>', 'gramps_wiki_recover_url': URL_WIKISTRING + 'Recover_corrupted_family_tree', 'dirname': dirname}, _("Proceed, I have taken a backup"), _("Stop"), parent=self.top) prompt = yes_no.run() if not prompt: return opened = store[node][OPEN_COL] if opened: self.dbstate.no_database() # delete files that are not backup files or the .txt file for filename in os.listdir(dirname): if os.path.splitext(filename)[1] not in (".gbkp", ".txt"): fname = os.path.join(dirname, filename) os.unlink(fname) dbase = make_database("sqlite") dbase.load(dirname, None) self.__start_cursor(_("Rebuilding database from backup files")) try: dbase.restore() except DbException as msg: ErrorDialog(_("Error restoring backup data"), msg, parent=self.top) self.__end_cursor() dbase.close(user=self.user) self.dbstate.no_database() self.__populate() self._select_default() def __start_cursor(self, msg): """ Set the cursor to the busy state, and displays the associated message """ self.msg.set_label(msg) self.top.get_window().set_cursor(self.BUSY_CURSOR) while Gtk.events_pending(): Gtk.main_iteration() def __end_cursor(self): """ Set the cursor back to normal and clears the message """ self.top.get_window().set_cursor(None) self.msg.set_label("") def __new_db(self, obj): """ Callback wrapper around the actual routine that creates the new database. Catch OSError and IOError and display a warning message. """ self.new_btn.set_sensitive(False) dbid = config.get('database.backend') if dbid: try: self._create_new_db(dbid=dbid) except (OSError, IOError) as msg: ErrorDialog(_("Could not create Family Tree"), str(msg), parent=self.top) self.new_btn.set_sensitive(True) def _create_new_db(self, title=None, create_db=True, dbid=None, edit_entry=True): """ Create a new database, append to model """ new_path, title = self.create_new_db_cli(title, create_db, dbid) path_name = os.path.join(new_path, NAME_FILE) (tval, last) = time_val(new_path) backend_type = self.get_backend_name_from_dbid(dbid) node = self.model.append(None, [title, new_path, path_name, last, tval, False, '', backend_type]) self.selection.select_iter(node) path = self.model.get_path(node) if edit_entry: self.name_renderer.set_property('editable', True) self.dblist.set_cursor(path, self.column, True) return new_path, title def __drag_data_received(self, widget, context, xpos, ypos, selection, info, rtime): """ Handle the reception of drag data """ drag_value = selection.get_data().decode() fname = None type = None title = None # Allow any type of URL ("file://", "http://", etc): if drag_value and urlparse(drag_value).scheme != "": fname, title = [], [] for treename in [v.strip() for v in drag_value.split("\n") if v.strip() != '']: f, t = self.import_new_db(treename, self.user) fname.append(f) title.append(t) return fname, title def drag_motion(wid, context, xpos, ypos, time_stamp): """ DND callback that is called on a DND drag motion begin """ Gdk.drag_status(context, Gdk.DragAction.COPY, time_stamp) return True def drop_cb(wid, context, xpos, ypos, time_stamp): """ DND callback that finishes the DND operation """ Gtk.drag_finish(context, True, False, time_stamp) return True def find_revisions(name): """ Finds all the revisions of the specified RCS archive. """ import re rev = re.compile(r"\s*revision\s+([\d\.]+)") date = re.compile(r"date:\s+(\d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d)[-+]\d\d;") if not os.path.isfile(name) or not _RCS_FOUND: return [] rlog = ["rlog", "-x,v", "-zLT", name] proc = subprocess.Popen(rlog, stdout=subprocess.PIPE) proc.wait() revlist = [] date_str = "" rev_str = "" com_str = "" get_next = False if os.path.isfile(name): for line in proc.stdout: if not isinstance(line, str): # we assume utf-8 ... line = line.decode('utf-8') match = rev.match(line) if match: rev_str = copy.copy(match.groups()[0]) continue match = date.match(line) if match: date_str = time.strftime( '%x %X', time.strptime(match.groups()[0], '%Y-%m-%d %H:%M:%S')) get_next = True continue if get_next: get_next = False com_str = line.strip() revlist.append((rev_str, date_str, com_str)) proc.stdout.close() del proc return revlist def check_out(dbase, rev, path, user): """ Checks out the revision from rcs, and loads the resulting XML file into the database. """ co_cmd = ["co", "-x,v", "-q%s" % rev] + [os.path.join(path, ARCHIVE), os.path.join(path, ARCHIVE_V)] proc = subprocess.Popen(co_cmd, stderr=subprocess.PIPE) status = proc.wait() message = "\n".join(proc.stderr.readlines()) proc.stderr.close() del proc if status != 0: user.notify_error( _("Retrieve failed"), _("An attempt to retrieve the data failed " "with the following message:\n\n%s") % message ) return pmgr = GuiPluginManager.get_instance() for plugin in pmgr.get_import_plugins(): if plugin.get_extension() == "gramps": rdr = plugin.get_import_function() xml_file = os.path.join(path, ARCHIVE) rdr(dbase, xml_file, user) os.unlink(xml_file) def _check_in(dbase, filename, user, cursor_func=None, parent=None): """ Checks in the specified file into RCS """ init = ["rcs", '-x,v', '-i', '-U', '-q', '-t-"Gramps database"'] ci_cmd = ["ci", '-x,v', "-q", "-f"] archive_name = filename + ",v" glade = Glade(toplevel='comment') top = glade.toplevel text = glade.get_object('description') top.set_transient_for(parent) top.run() comment = text.get_text() top.destroy() if not os.path.isfile(archive_name): cmd = init + [archive_name] proc = subprocess.Popen(cmd, stderr=subprocess.PIPE) status = proc.wait() message = "\n".join(proc.stderr.readlines()) proc.stderr.close() del proc if status != 0: ErrorDialog(_("Archiving failed"), _("An attempt to create the archive failed " "with the following message:\n\n%s") % message, parent=self.top) if cursor_func: cursor_func(_("Creating data to be archived...")) plugin_manager = GuiPluginManager.get_instance() for plugin in plugin_manager.get_export_plugins(): if plugin.get_extension() == "gramps": export_function = plugin.get_export_function() export_function(dbase, filename, user) if cursor_func: cursor_func(_("Saving archive...")) cmd = ci_cmd + ['-m%s' % comment, filename, archive_name] proc = subprocess.Popen(cmd, stderr=subprocess.PIPE) status = proc.wait() message = "\n".join(proc.stderr.readlines()) proc.stderr.close() del proc if status != 0: ErrorDialog(_("Archiving failed"), _("An attempt to archive the data failed " "with the following message:\n\n%s") % message, parent=self.top) def bug_fix(column, renderer, model, iter_, data): """ Cell data function to set the status column. There is a bug in pygobject which prevents us from setting a value to None using the TreeModel set_value method. Instead we set it to an empty string and convert it to None here. """ icon_name = model.get_value(iter_, ICON_COL) if icon_name == '': icon_name = None renderer.set_property('icon-name', icon_name)
SNoiraud/gramps
gramps/gui/dbman.py
Python
gpl-2.0
45,607
[ "Brian" ]
a02eb47e95f326009efb7923bfd1d8d3fe9a9ac6d435852d971a8c09ec0c7a3f
#!/usr/bin/env python """ This script rotates circular sequences and makes them to start from specific positions in output FASTA files. It also circularises every sequence and save it in a GFA file if a filename is given to argument --gfa/-g. This script is useful for improving complete genome assemblies as well as read simulation. Command: python rotateSeq.py -i input.fna -t new_starts.tsv -f output.fna -g output.gfa 2> messages.err python rotateSeq.py -i input.fna -t new_starts.tsv -f output.fna && gzip output.fna New start positions are specified in a three-column, tab-delimited, header-free table (parameter '-t'): 'sequence ID'\t'Position'\t'Orientation (+/-)'. No change is applied to input sequences whose sequence IDs are not listed in this table (e.g., when some sequences are linear or incomplete). Dependencies: Python 3, BioPython 1.78+. Copyright (C) 2021 Yu Wan <wanyuac@126.com> Licensed under the GNU General Public Licence version 3 (GPLv3) <https://www.gnu.org/licenses/>. Creation: 17 June 2021; the latest update: 9 January 2022 """ import os import sys from Bio import SeqIO from Bio.Seq import Seq # Bio.Alphabet has been removed from BioPython from v1.78. See https://biopython.org/wiki/Alphabet. from argparse import ArgumentParser from collections import namedtuple def parse_argument(): parser = ArgumentParser(description = "Restart circular sequences from given positions") parser.add_argument('-i', '--input', dest = 'i', type = str, required = True, help = "An input FASTA file") parser.add_argument('-t', '--table', dest = 't', type = str, required = True, \ help = "A tab-delimited, header-free table of three columns: sequence ID, the base to be used as the first base of the new sequence, and the orientation (+/-) of the new sequence") parser.add_argument('-f', '--fasta', dest = 'f', type = str, required = False, default = 'rotated_seq.fna', help = "Output FASTA file") parser.add_argument('-g', '--gfa', dest = 'g', required = False, default = None, help = "Output GFA file, assuming all contigs are circular") return parser.parse_args() def main(): args = parse_argument() prev_seqs = import_seqs(args.i) pos_spec = import_positions(args.t) fasta_out = open(file = args.f, mode = 'w', encoding = 'utf-8') make_gfa = args.g != None if make_gfa: gfa_out = open(file = args.g, mode = 'w', encoding = 'utf-8') gfa_out.write("H\tVN:Z:1.0\n") # The GFA header for i, contig in prev_seqs.items(): if i in pos_spec.keys(): p = pos_spec[i] # 'p' is a namedtuple 'Pos' defined by function import_positions. if p.ori == '+': # Simpler case: start from the chosen base and then go clockwise to create the new sequence. if p.base > 0: # Convert the p-th position into Python's character index. Note that no change will be carried out by Python if p > len(contig.seq). s = p.base - 1 # Index of the chosen base (start base) in the sequence (a string). print("Restart sequence %s from base %i and go clockwise to create the new sequence." % (i, p.base), file = sys.stderr) seq = str(contig.seq) contig.seq = Seq(seq[s : ] + seq[ : s]) # Rotate the current sequence; no change when p.base = 0. "generic_dna" is no longer needed from BioPython v1.78. else: print("Warning: position %i for sequence %s cannot be negative. No change will be applied to this sequence." % (p.base, i), file = sys.stderr) else: if p.base > 0: s = p.base # The calculation of the index is: s = (p.base + 1) - 1, where 'p.base + 1' is the new start base required for creating the correct sequence that goes counterclockwise. print("Restart sequence %s from base %i and go counterclockwise to create the new sequence." % (i, p.base), file = sys.stderr) seq = str(contig.seq) contig.seq = Seq(seq[s : ] + seq[ : s]).reverse_complement() # Rotate and then take the reverse complement (return value: a new Seq object) else: print("Warning: position %i for sequence %s cannot be negative. No change will be applied to this sequence." % (p.base, i), file = sys.stderr) else: print("Warning: sequence " + i + " is not found in the position table. No change will be applied to this sequence.", file = sys.stderr) SeqIO.write(contig, fasta_out, "fasta") if make_gfa: gfa_out.write(fasta2gfa(i, contig)) fasta_out.close() if make_gfa: gfa_out.close() return def import_seqs(fasta): """ Returns a dictionary of SeqIO.SeqRecord objects {seq_id : SeqRecord}. """ check_file(fasta) seqs = SeqIO.to_dict(SeqIO.parse(fasta, 'fasta')) return seqs def import_positions(tsv): """ Returns a dictionary of namedtuples: {seq_id : Pos(base, ori)}. base: he base to be used as the first base of the new sequence; ori: to go clockwise (+) or counterclockwise (-) from the chosen base to construct the new sequence. """ check_file(tsv) Pos = namedtuple('Pos', ['base', 'ori']) with open(tsv, 'r') as f: pos = dict() # {seq_id : Pos} lines = f.read().splitlines() for line in lines: if line != '': i, p, d = line.split('\t') pos[i] = Pos(base = int(p), ori = d) return(pos) def fasta2gfa(i, contig): """ Makes GFA-formatted lines from an input contig sequence """ s = str(contig.seq) bp = str(len(s)) lines = '\t'.join(['S', i, s, 'LN:i:' + bp, 'RC:i:' + bp]) + '\n' # Arbitrarily assign a (relative) fold coverage of one to each sequence. lines += '\t'.join(['L', i, '+', i, '+', '0M']) + '\n' return lines def check_file(f): if not os.path.exists(f): print("Argument error: file " + f + " is not accessible.", file = sys.stderr) sys.exit(1) return if __name__ == '__main__': main()
wanyuac/BINF_toolkit
rotateSeq.py
Python
gpl-3.0
5,654
[ "Biopython" ]
06d3972b5761b4a4b109a59fe0ab89205440347f5e7005cdf062d1c2dfb8824e
import os import errno import codecs from tao.settings import MODULE_INDICES from tao.xml_util import xml_parse def create_file(dir_path, filename, filenames_to_contents): file_path = os.path.join(dir_path, filename) mkdir_p(os.path.dirname(file_path)) with codecs.open(file_path, 'w', encoding='utf-8') as f: f.write(filenames_to_contents[filename]) def get_file_size(dir_path, file_name): file_path = os.path.join(dir_path, file_name) size = os.path.getsize(file_path) units = ['B', 'kB', 'MB'] for x in units: if size < 1000: return '%3d%s' % (round(size), x) size /= 1000 return '%3.1f%s' % (size, 'GB') def mkdir_p(path): try: os.makedirs(path) except OSError as exc: if exc.errno == errno.EEXIST and os.path.isdir(path): pass else: raise def write_file_from_zip(zipfile_obj, filename, fullpath): with open(fullpath, 'wb') as outfile: outfile.write(zipfile_obj.read(filename)) def make_form(defaults, form_class, values, prefix=None, ui_holder=None): if prefix in defaults: default_values = defaults[prefix].copy() else: default_values = {} default_values.update(values) return form_class(ui_holder, dict([(prefix + '-'+ k,v) for k,v in default_values.iteritems()]), prefix=prefix) def make_form_xml(form_class, xml_str, prefix=None, ui_holder=None): xml_root = xml_parse(xml_str) # print xml_root return form_class.from_xml(ui_holder, xml_root, prefix=prefix) class MockUIHolder: """ Just a very simple mock of the UI Holder to make sure the RecordFilterForm works """ def __init__(self, **kwargs): self._forms = kwargs self._dataset = None def update(self, **kwargs): self._forms.update(kwargs) return self def is_bound(self, module_name): if module_name not in self._forms: raise Exception("I am mock!") return self._forms[module_name].is_bound def raw_data(self, module_name, var_name): if module_name not in self._forms: raise Exception(module_name + " not in self._forms") return self._forms[module_name].data[module_name + '-' + var_name] def cleaned_data(self, module_index, var_name): try: # module_index = int(float(MODULE_INDICES[module_name]))-1 return self._forms[module_index].cleaned_data[var_name] except KeyError: print module_index + " not valid" def cleaned_data(self, module_name, var_name): if module_name not in self._forms: raise Exception(module_name + " not in self._forms") return self._forms[module_name].cleaned_data[var_name] def forms(self): return [v for k,v in self._forms.items()] def set_forms(self, forms): self._forms = forms def get_dataset(self): """Answer the dataset referenced by the receiver (through the selected Dark Matter Simulation and Galaxy Model)""" if self._dataset is None: raise Exception("I am poorly configured mock without _dataset") return self._dataset def set_dataset(self, v): self._dataset = v dataset = property(get_dataset, set_dataset) class TaoModelsCleanUpMixin(object): def tearDown(self): m = __import__('tao.models') for name in ['Simulation', 'GalaxyModel', 'DataSet', 'DataSetProperty', 'StellarModel', 'DustModel', 'Snapshot', 'BandPassFilter', 'WorkflowCommand', 'Job', 'GlobalParameter', 'SurveyPreset']: klass = getattr(m.models, name) for obj in klass.objects.all(): obj.delete()
IntersectAustralia/asvo-tao
web/tao/tests/helper.py
Python
gpl-3.0
3,685
[ "Galaxy" ]
4661dfda659df89ddac51ca8fd8d02ecd20710fac8dd3a4ca2f60db563d578c3
from __future__ import division, absolute_import, print_function import warnings import numpy as np from numpy.testing import ( assert_, assert_raises, assert_equal, assert_warns, assert_no_warnings, assert_array_equal, assert_array_almost_equal, suppress_warnings ) from numpy import random import sys class TestSeed(object): def test_scalar(self): s = np.random.RandomState(0) assert_equal(s.randint(1000), 684) s = np.random.RandomState(4294967295) assert_equal(s.randint(1000), 419) def test_array(self): s = np.random.RandomState(range(10)) assert_equal(s.randint(1000), 468) s = np.random.RandomState(np.arange(10)) assert_equal(s.randint(1000), 468) s = np.random.RandomState([0]) assert_equal(s.randint(1000), 973) s = np.random.RandomState([4294967295]) assert_equal(s.randint(1000), 265) def test_invalid_scalar(self): # seed must be an unsigned 32 bit integer assert_raises(TypeError, np.random.RandomState, -0.5) assert_raises(ValueError, np.random.RandomState, -1) def test_invalid_array(self): # seed must be an unsigned 32 bit integer assert_raises(TypeError, np.random.RandomState, [-0.5]) assert_raises(ValueError, np.random.RandomState, [-1]) assert_raises(ValueError, np.random.RandomState, [4294967296]) assert_raises(ValueError, np.random.RandomState, [1, 2, 4294967296]) assert_raises(ValueError, np.random.RandomState, [1, -2, 4294967296]) def test_invalid_array_shape(self): # gh-9832 assert_raises(ValueError, np.random.RandomState, np.array([], dtype=np.int64)) assert_raises(ValueError, np.random.RandomState, [[1, 2, 3]]) assert_raises(ValueError, np.random.RandomState, [[1, 2, 3], [4, 5, 6]]) class TestBinomial(object): def test_n_zero(self): # Tests the corner case of n == 0 for the binomial distribution. # binomial(0, p) should be zero for any p in [0, 1]. # This test addresses issue #3480. zeros = np.zeros(2, dtype='int') for p in [0, .5, 1]: assert_(random.binomial(0, p) == 0) assert_array_equal(random.binomial(zeros, p), zeros) def test_p_is_nan(self): # Issue #4571. assert_raises(ValueError, random.binomial, 1, np.nan) class TestMultinomial(object): def test_basic(self): random.multinomial(100, [0.2, 0.8]) def test_zero_probability(self): random.multinomial(100, [0.2, 0.8, 0.0, 0.0, 0.0]) def test_int_negative_interval(self): assert_(-5 <= random.randint(-5, -1) < -1) x = random.randint(-5, -1, 5) assert_(np.all(-5 <= x)) assert_(np.all(x < -1)) def test_size(self): # gh-3173 p = [0.5, 0.5] assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2)) assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2)) assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2)) assert_equal(np.random.multinomial(1, p, [2, 2]).shape, (2, 2, 2)) assert_equal(np.random.multinomial(1, p, (2, 2)).shape, (2, 2, 2)) assert_equal(np.random.multinomial(1, p, np.array((2, 2))).shape, (2, 2, 2)) assert_raises(TypeError, np.random.multinomial, 1, p, float(1)) class TestSetState(object): def setup(self): self.seed = 1234567890 self.prng = random.RandomState(self.seed) self.state = self.prng.get_state() def test_basic(self): old = self.prng.tomaxint(16) self.prng.set_state(self.state) new = self.prng.tomaxint(16) assert_(np.all(old == new)) def test_gaussian_reset(self): # Make sure the cached every-other-Gaussian is reset. old = self.prng.standard_normal(size=3) self.prng.set_state(self.state) new = self.prng.standard_normal(size=3) assert_(np.all(old == new)) def test_gaussian_reset_in_media_res(self): # When the state is saved with a cached Gaussian, make sure the # cached Gaussian is restored. self.prng.standard_normal() state = self.prng.get_state() old = self.prng.standard_normal(size=3) self.prng.set_state(state) new = self.prng.standard_normal(size=3) assert_(np.all(old == new)) def test_backwards_compatibility(self): # Make sure we can accept old state tuples that do not have the # cached Gaussian value. old_state = self.state[:-2] x1 = self.prng.standard_normal(size=16) self.prng.set_state(old_state) x2 = self.prng.standard_normal(size=16) self.prng.set_state(self.state) x3 = self.prng.standard_normal(size=16) assert_(np.all(x1 == x2)) assert_(np.all(x1 == x3)) def test_negative_binomial(self): # Ensure that the negative binomial results take floating point # arguments without truncation. self.prng.negative_binomial(0.5, 0.5) class TestRandint(object): rfunc = np.random.randint # valid integer/boolean types itype = [np.bool_, np.int8, np.uint8, np.int16, np.uint16, np.int32, np.uint32, np.int64, np.uint64] def test_unsupported_type(self): assert_raises(TypeError, self.rfunc, 1, dtype=float) def test_bounds_checking(self): for dt in self.itype: lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1 assert_raises(ValueError, self.rfunc, lbnd - 1, ubnd, dtype=dt) assert_raises(ValueError, self.rfunc, lbnd, ubnd + 1, dtype=dt) assert_raises(ValueError, self.rfunc, ubnd, lbnd, dtype=dt) assert_raises(ValueError, self.rfunc, 1, 0, dtype=dt) def test_rng_zero_and_extremes(self): for dt in self.itype: lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1 tgt = ubnd - 1 assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt) tgt = lbnd assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt) tgt = (lbnd + ubnd)//2 assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt) def test_full_range(self): # Test for ticket #1690 for dt in self.itype: lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1 try: self.rfunc(lbnd, ubnd, dtype=dt) except Exception as e: raise AssertionError("No error should have been raised, " "but one was with the following " "message:\n\n%s" % str(e)) def test_in_bounds_fuzz(self): # Don't use fixed seed np.random.seed() for dt in self.itype[1:]: for ubnd in [4, 8, 16]: vals = self.rfunc(2, ubnd, size=2**16, dtype=dt) assert_(vals.max() < ubnd) assert_(vals.min() >= 2) vals = self.rfunc(0, 2, size=2**16, dtype=np.bool_) assert_(vals.max() < 2) assert_(vals.min() >= 0) def test_repeatability(self): import hashlib # We use a md5 hash of generated sequences of 1000 samples # in the range [0, 6) for all but bool, where the range # is [0, 2). Hashes are for little endian numbers. tgt = {'bool': '7dd3170d7aa461d201a65f8bcf3944b0', 'int16': '1b7741b80964bb190c50d541dca1cac1', 'int32': '4dc9fcc2b395577ebb51793e58ed1a05', 'int64': '17db902806f448331b5a758d7d2ee672', 'int8': '27dd30c4e08a797063dffac2490b0be6', 'uint16': '1b7741b80964bb190c50d541dca1cac1', 'uint32': '4dc9fcc2b395577ebb51793e58ed1a05', 'uint64': '17db902806f448331b5a758d7d2ee672', 'uint8': '27dd30c4e08a797063dffac2490b0be6'} for dt in self.itype[1:]: np.random.seed(1234) # view as little endian for hash if sys.byteorder == 'little': val = self.rfunc(0, 6, size=1000, dtype=dt) else: val = self.rfunc(0, 6, size=1000, dtype=dt).byteswap() res = hashlib.md5(val.view(np.int8)).hexdigest() assert_(tgt[np.dtype(dt).name] == res) # bools do not depend on endianness np.random.seed(1234) val = self.rfunc(0, 2, size=1000, dtype=bool).view(np.int8) res = hashlib.md5(val).hexdigest() assert_(tgt[np.dtype(bool).name] == res) def test_int64_uint64_corner_case(self): # When stored in Numpy arrays, `lbnd` is casted # as np.int64, and `ubnd` is casted as np.uint64. # Checking whether `lbnd` >= `ubnd` used to be # done solely via direct comparison, which is incorrect # because when Numpy tries to compare both numbers, # it casts both to np.float64 because there is # no integer superset of np.int64 and np.uint64. However, # `ubnd` is too large to be represented in np.float64, # causing it be round down to np.iinfo(np.int64).max, # leading to a ValueError because `lbnd` now equals # the new `ubnd`. dt = np.int64 tgt = np.iinfo(np.int64).max lbnd = np.int64(np.iinfo(np.int64).max) ubnd = np.uint64(np.iinfo(np.int64).max + 1) # None of these function calls should # generate a ValueError now. actual = np.random.randint(lbnd, ubnd, dtype=dt) assert_equal(actual, tgt) def test_respect_dtype_singleton(self): # See gh-7203 for dt in self.itype: lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1 sample = self.rfunc(lbnd, ubnd, dtype=dt) assert_equal(sample.dtype, np.dtype(dt)) for dt in (bool, int, np.compat.long): lbnd = 0 if dt is bool else np.iinfo(dt).min ubnd = 2 if dt is bool else np.iinfo(dt).max + 1 # gh-7284: Ensure that we get Python data types sample = self.rfunc(lbnd, ubnd, dtype=dt) assert_(not hasattr(sample, 'dtype')) assert_equal(type(sample), dt) class TestRandomDist(object): # Make sure the random distribution returns the correct value for a # given seed def setup(self): self.seed = 1234567890 def test_rand(self): np.random.seed(self.seed) actual = np.random.rand(3, 2) desired = np.array([[0.61879477158567997, 0.59162362775974664], [0.88868358904449662, 0.89165480011560816], [0.4575674820298663, 0.7781880808593471]]) assert_array_almost_equal(actual, desired, decimal=15) def test_randn(self): np.random.seed(self.seed) actual = np.random.randn(3, 2) desired = np.array([[1.34016345771863121, 1.73759122771936081], [1.498988344300628, -0.2286433324536169], [2.031033998682787, 2.17032494605655257]]) assert_array_almost_equal(actual, desired, decimal=15) def test_randint(self): np.random.seed(self.seed) actual = np.random.randint(-99, 99, size=(3, 2)) desired = np.array([[31, 3], [-52, 41], [-48, -66]]) assert_array_equal(actual, desired) def test_random_integers(self): np.random.seed(self.seed) with suppress_warnings() as sup: w = sup.record(DeprecationWarning) actual = np.random.random_integers(-99, 99, size=(3, 2)) assert_(len(w) == 1) desired = np.array([[31, 3], [-52, 41], [-48, -66]]) assert_array_equal(actual, desired) def test_random_integers_max_int(self): # Tests whether random_integers can generate the # maximum allowed Python int that can be converted # into a C long. Previous implementations of this # method have thrown an OverflowError when attempting # to generate this integer. with suppress_warnings() as sup: w = sup.record(DeprecationWarning) actual = np.random.random_integers(np.iinfo('l').max, np.iinfo('l').max) assert_(len(w) == 1) desired = np.iinfo('l').max assert_equal(actual, desired) def test_random_integers_deprecated(self): with warnings.catch_warnings(): warnings.simplefilter("error", DeprecationWarning) # DeprecationWarning raised with high == None assert_raises(DeprecationWarning, np.random.random_integers, np.iinfo('l').max) # DeprecationWarning raised with high != None assert_raises(DeprecationWarning, np.random.random_integers, np.iinfo('l').max, np.iinfo('l').max) def test_random(self): np.random.seed(self.seed) actual = np.random.random((3, 2)) desired = np.array([[0.61879477158567997, 0.59162362775974664], [0.88868358904449662, 0.89165480011560816], [0.4575674820298663, 0.7781880808593471]]) assert_array_almost_equal(actual, desired, decimal=15) def test_choice_uniform_replace(self): np.random.seed(self.seed) actual = np.random.choice(4, 4) desired = np.array([2, 3, 2, 3]) assert_array_equal(actual, desired) def test_choice_nonuniform_replace(self): np.random.seed(self.seed) actual = np.random.choice(4, 4, p=[0.4, 0.4, 0.1, 0.1]) desired = np.array([1, 1, 2, 2]) assert_array_equal(actual, desired) def test_choice_uniform_noreplace(self): np.random.seed(self.seed) actual = np.random.choice(4, 3, replace=False) desired = np.array([0, 1, 3]) assert_array_equal(actual, desired) def test_choice_nonuniform_noreplace(self): np.random.seed(self.seed) actual = np.random.choice(4, 3, replace=False, p=[0.1, 0.3, 0.5, 0.1]) desired = np.array([2, 3, 1]) assert_array_equal(actual, desired) def test_choice_noninteger(self): np.random.seed(self.seed) actual = np.random.choice(['a', 'b', 'c', 'd'], 4) desired = np.array(['c', 'd', 'c', 'd']) assert_array_equal(actual, desired) def test_choice_exceptions(self): sample = np.random.choice assert_raises(ValueError, sample, -1, 3) assert_raises(ValueError, sample, 3., 3) assert_raises(ValueError, sample, [[1, 2], [3, 4]], 3) assert_raises(ValueError, sample, [], 3) assert_raises(ValueError, sample, [1, 2, 3, 4], 3, p=[[0.25, 0.25], [0.25, 0.25]]) assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4, 0.2]) assert_raises(ValueError, sample, [1, 2], 3, p=[1.1, -0.1]) assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4]) assert_raises(ValueError, sample, [1, 2, 3], 4, replace=False) # gh-13087 assert_raises(ValueError, sample, [1, 2, 3], -2, replace=False) assert_raises(ValueError, sample, [1, 2, 3], (-1,), replace=False) assert_raises(ValueError, sample, [1, 2, 3], (-1, 1), replace=False) assert_raises(ValueError, sample, [1, 2, 3], 2, replace=False, p=[1, 0, 0]) def test_choice_return_shape(self): p = [0.1, 0.9] # Check scalar assert_(np.isscalar(np.random.choice(2, replace=True))) assert_(np.isscalar(np.random.choice(2, replace=False))) assert_(np.isscalar(np.random.choice(2, replace=True, p=p))) assert_(np.isscalar(np.random.choice(2, replace=False, p=p))) assert_(np.isscalar(np.random.choice([1, 2], replace=True))) assert_(np.random.choice([None], replace=True) is None) a = np.array([1, 2]) arr = np.empty(1, dtype=object) arr[0] = a assert_(np.random.choice(arr, replace=True) is a) # Check 0-d array s = tuple() assert_(not np.isscalar(np.random.choice(2, s, replace=True))) assert_(not np.isscalar(np.random.choice(2, s, replace=False))) assert_(not np.isscalar(np.random.choice(2, s, replace=True, p=p))) assert_(not np.isscalar(np.random.choice(2, s, replace=False, p=p))) assert_(not np.isscalar(np.random.choice([1, 2], s, replace=True))) assert_(np.random.choice([None], s, replace=True).ndim == 0) a = np.array([1, 2]) arr = np.empty(1, dtype=object) arr[0] = a assert_(np.random.choice(arr, s, replace=True).item() is a) # Check multi dimensional array s = (2, 3) p = [0.1, 0.1, 0.1, 0.1, 0.4, 0.2] assert_equal(np.random.choice(6, s, replace=True).shape, s) assert_equal(np.random.choice(6, s, replace=False).shape, s) assert_equal(np.random.choice(6, s, replace=True, p=p).shape, s) assert_equal(np.random.choice(6, s, replace=False, p=p).shape, s) assert_equal(np.random.choice(np.arange(6), s, replace=True).shape, s) # Check zero-size assert_equal(np.random.randint(0, 0, size=(3, 0, 4)).shape, (3, 0, 4)) assert_equal(np.random.randint(0, -10, size=0).shape, (0,)) assert_equal(np.random.randint(10, 10, size=0).shape, (0,)) assert_equal(np.random.choice(0, size=0).shape, (0,)) assert_equal(np.random.choice([], size=(0,)).shape, (0,)) assert_equal(np.random.choice(['a', 'b'], size=(3, 0, 4)).shape, (3, 0, 4)) assert_raises(ValueError, np.random.choice, [], 10) def test_choice_nan_probabilities(self): a = np.array([42, 1, 2]) p = [None, None, None] assert_raises(ValueError, np.random.choice, a, p=p) def test_bytes(self): np.random.seed(self.seed) actual = np.random.bytes(10) desired = b'\x82Ui\x9e\xff\x97+Wf\xa5' assert_equal(actual, desired) def test_shuffle(self): # Test lists, arrays (of various dtypes), and multidimensional versions # of both, c-contiguous or not: for conv in [lambda x: np.array([]), lambda x: x, lambda x: np.asarray(x).astype(np.int8), lambda x: np.asarray(x).astype(np.float32), lambda x: np.asarray(x).astype(np.complex64), lambda x: np.asarray(x).astype(object), lambda x: [(i, i) for i in x], lambda x: np.asarray([[i, i] for i in x]), lambda x: np.vstack([x, x]).T, # gh-11442 lambda x: (np.asarray([(i, i) for i in x], [("a", int), ("b", int)]) .view(np.recarray)), # gh-4270 lambda x: np.asarray([(i, i) for i in x], [("a", object), ("b", np.int32)])]: np.random.seed(self.seed) alist = conv([1, 2, 3, 4, 5, 6, 7, 8, 9, 0]) np.random.shuffle(alist) actual = alist desired = conv([0, 1, 9, 6, 2, 4, 5, 8, 7, 3]) assert_array_equal(actual, desired) def test_shuffle_masked(self): # gh-3263 a = np.ma.masked_values(np.reshape(range(20), (5, 4)) % 3 - 1, -1) b = np.ma.masked_values(np.arange(20) % 3 - 1, -1) a_orig = a.copy() b_orig = b.copy() for i in range(50): np.random.shuffle(a) assert_equal( sorted(a.data[~a.mask]), sorted(a_orig.data[~a_orig.mask])) np.random.shuffle(b) assert_equal( sorted(b.data[~b.mask]), sorted(b_orig.data[~b_orig.mask])) def test_beta(self): np.random.seed(self.seed) actual = np.random.beta(.1, .9, size=(3, 2)) desired = np.array( [[1.45341850513746058e-02, 5.31297615662868145e-04], [1.85366619058432324e-06, 4.19214516800110563e-03], [1.58405155108498093e-04, 1.26252891949397652e-04]]) assert_array_almost_equal(actual, desired, decimal=15) def test_binomial(self): np.random.seed(self.seed) actual = np.random.binomial(100, .456, size=(3, 2)) desired = np.array([[37, 43], [42, 48], [46, 45]]) assert_array_equal(actual, desired) def test_chisquare(self): np.random.seed(self.seed) actual = np.random.chisquare(50, size=(3, 2)) desired = np.array([[63.87858175501090585, 68.68407748911370447], [65.77116116901505904, 47.09686762438974483], [72.3828403199695174, 74.18408615260374006]]) assert_array_almost_equal(actual, desired, decimal=13) def test_dirichlet(self): np.random.seed(self.seed) alpha = np.array([51.72840233779265162, 39.74494232180943953]) actual = np.random.mtrand.dirichlet(alpha, size=(3, 2)) desired = np.array([[[0.54539444573611562, 0.45460555426388438], [0.62345816822039413, 0.37654183177960598]], [[0.55206000085785778, 0.44793999914214233], [0.58964023305154301, 0.41035976694845688]], [[0.59266909280647828, 0.40733090719352177], [0.56974431743975207, 0.43025568256024799]]]) assert_array_almost_equal(actual, desired, decimal=15) def test_dirichlet_size(self): # gh-3173 p = np.array([51.72840233779265162, 39.74494232180943953]) assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2)) assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2)) assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2)) assert_equal(np.random.dirichlet(p, [2, 2]).shape, (2, 2, 2)) assert_equal(np.random.dirichlet(p, (2, 2)).shape, (2, 2, 2)) assert_equal(np.random.dirichlet(p, np.array((2, 2))).shape, (2, 2, 2)) assert_raises(TypeError, np.random.dirichlet, p, float(1)) def test_dirichlet_bad_alpha(self): # gh-2089 alpha = np.array([5.4e-01, -1.0e-16]) assert_raises(ValueError, np.random.mtrand.dirichlet, alpha) def test_exponential(self): np.random.seed(self.seed) actual = np.random.exponential(1.1234, size=(3, 2)) desired = np.array([[1.08342649775011624, 1.00607889924557314], [2.46628830085216721, 2.49668106809923884], [0.68717433461363442, 1.69175666993575979]]) assert_array_almost_equal(actual, desired, decimal=15) def test_exponential_0(self): assert_equal(np.random.exponential(scale=0), 0) assert_raises(ValueError, np.random.exponential, scale=-0.) def test_f(self): np.random.seed(self.seed) actual = np.random.f(12, 77, size=(3, 2)) desired = np.array([[1.21975394418575878, 1.75135759791559775], [1.44803115017146489, 1.22108959480396262], [1.02176975757740629, 1.34431827623300415]]) assert_array_almost_equal(actual, desired, decimal=15) def test_gamma(self): np.random.seed(self.seed) actual = np.random.gamma(5, 3, size=(3, 2)) desired = np.array([[24.60509188649287182, 28.54993563207210627], [26.13476110204064184, 12.56988482927716078], [31.71863275789960568, 33.30143302795922011]]) assert_array_almost_equal(actual, desired, decimal=14) def test_gamma_0(self): assert_equal(np.random.gamma(shape=0, scale=0), 0) assert_raises(ValueError, np.random.gamma, shape=-0., scale=-0.) def test_geometric(self): np.random.seed(self.seed) actual = np.random.geometric(.123456789, size=(3, 2)) desired = np.array([[8, 7], [17, 17], [5, 12]]) assert_array_equal(actual, desired) def test_gumbel(self): np.random.seed(self.seed) actual = np.random.gumbel(loc=.123456789, scale=2.0, size=(3, 2)) desired = np.array([[0.19591898743416816, 0.34405539668096674], [-1.4492522252274278, -1.47374816298446865], [1.10651090478803416, -0.69535848626236174]]) assert_array_almost_equal(actual, desired, decimal=15) def test_gumbel_0(self): assert_equal(np.random.gumbel(scale=0), 0) assert_raises(ValueError, np.random.gumbel, scale=-0.) def test_hypergeometric(self): np.random.seed(self.seed) actual = np.random.hypergeometric(10, 5, 14, size=(3, 2)) desired = np.array([[10, 10], [10, 10], [9, 9]]) assert_array_equal(actual, desired) # Test nbad = 0 actual = np.random.hypergeometric(5, 0, 3, size=4) desired = np.array([3, 3, 3, 3]) assert_array_equal(actual, desired) actual = np.random.hypergeometric(15, 0, 12, size=4) desired = np.array([12, 12, 12, 12]) assert_array_equal(actual, desired) # Test ngood = 0 actual = np.random.hypergeometric(0, 5, 3, size=4) desired = np.array([0, 0, 0, 0]) assert_array_equal(actual, desired) actual = np.random.hypergeometric(0, 15, 12, size=4) desired = np.array([0, 0, 0, 0]) assert_array_equal(actual, desired) def test_laplace(self): np.random.seed(self.seed) actual = np.random.laplace(loc=.123456789, scale=2.0, size=(3, 2)) desired = np.array([[0.66599721112760157, 0.52829452552221945], [3.12791959514407125, 3.18202813572992005], [-0.05391065675859356, 1.74901336242837324]]) assert_array_almost_equal(actual, desired, decimal=15) def test_laplace_0(self): assert_equal(np.random.laplace(scale=0), 0) assert_raises(ValueError, np.random.laplace, scale=-0.) def test_logistic(self): np.random.seed(self.seed) actual = np.random.logistic(loc=.123456789, scale=2.0, size=(3, 2)) desired = np.array([[1.09232835305011444, 0.8648196662399954], [4.27818590694950185, 4.33897006346929714], [-0.21682183359214885, 2.63373365386060332]]) assert_array_almost_equal(actual, desired, decimal=15) def test_lognormal(self): np.random.seed(self.seed) actual = np.random.lognormal(mean=.123456789, sigma=2.0, size=(3, 2)) desired = np.array([[16.50698631688883822, 36.54846706092654784], [22.67886599981281748, 0.71617561058995771], [65.72798501792723869, 86.84341601437161273]]) assert_array_almost_equal(actual, desired, decimal=13) def test_lognormal_0(self): assert_equal(np.random.lognormal(sigma=0), 1) assert_raises(ValueError, np.random.lognormal, sigma=-0.) def test_logseries(self): np.random.seed(self.seed) actual = np.random.logseries(p=.923456789, size=(3, 2)) desired = np.array([[2, 2], [6, 17], [3, 6]]) assert_array_equal(actual, desired) def test_multinomial(self): np.random.seed(self.seed) actual = np.random.multinomial(20, [1/6.]*6, size=(3, 2)) desired = np.array([[[4, 3, 5, 4, 2, 2], [5, 2, 8, 2, 2, 1]], [[3, 4, 3, 6, 0, 4], [2, 1, 4, 3, 6, 4]], [[4, 4, 2, 5, 2, 3], [4, 3, 4, 2, 3, 4]]]) assert_array_equal(actual, desired) def test_multivariate_normal(self): np.random.seed(self.seed) mean = (.123456789, 10) cov = [[1, 0], [0, 1]] size = (3, 2) actual = np.random.multivariate_normal(mean, cov, size) desired = np.array([[[1.463620246718631, 11.73759122771936], [1.622445133300628, 9.771356667546383]], [[2.154490787682787, 12.170324946056553], [1.719909438201865, 9.230548443648306]], [[0.689515026297799, 9.880729819607714], [-0.023054015651998, 9.201096623542879]]]) assert_array_almost_equal(actual, desired, decimal=15) # Check for default size, was raising deprecation warning actual = np.random.multivariate_normal(mean, cov) desired = np.array([0.895289569463708, 9.17180864067987]) assert_array_almost_equal(actual, desired, decimal=15) # Check that non positive-semidefinite covariance warns with # RuntimeWarning mean = [0, 0] cov = [[1, 2], [2, 1]] assert_warns(RuntimeWarning, np.random.multivariate_normal, mean, cov) # and that it doesn't warn with RuntimeWarning check_valid='ignore' assert_no_warnings(np.random.multivariate_normal, mean, cov, check_valid='ignore') # and that it raises with RuntimeWarning check_valid='raises' assert_raises(ValueError, np.random.multivariate_normal, mean, cov, check_valid='raise') cov = np.array([[1, 0.1], [0.1, 1]], dtype=np.float32) with suppress_warnings() as sup: np.random.multivariate_normal(mean, cov) w = sup.record(RuntimeWarning) assert len(w) == 0 def test_negative_binomial(self): np.random.seed(self.seed) actual = np.random.negative_binomial(n=100, p=.12345, size=(3, 2)) desired = np.array([[848, 841], [892, 611], [779, 647]]) assert_array_equal(actual, desired) def test_noncentral_chisquare(self): np.random.seed(self.seed) actual = np.random.noncentral_chisquare(df=5, nonc=5, size=(3, 2)) desired = np.array([[23.91905354498517511, 13.35324692733826346], [31.22452661329736401, 16.60047399466177254], [5.03461598262724586, 17.94973089023519464]]) assert_array_almost_equal(actual, desired, decimal=14) actual = np.random.noncentral_chisquare(df=.5, nonc=.2, size=(3, 2)) desired = np.array([[1.47145377828516666, 0.15052899268012659], [0.00943803056963588, 1.02647251615666169], [0.332334982684171, 0.15451287602753125]]) assert_array_almost_equal(actual, desired, decimal=14) np.random.seed(self.seed) actual = np.random.noncentral_chisquare(df=5, nonc=0, size=(3, 2)) desired = np.array([[9.597154162763948, 11.725484450296079], [10.413711048138335, 3.694475922923986], [13.484222138963087, 14.377255424602957]]) assert_array_almost_equal(actual, desired, decimal=14) def test_noncentral_f(self): np.random.seed(self.seed) actual = np.random.noncentral_f(dfnum=5, dfden=2, nonc=1, size=(3, 2)) desired = np.array([[1.40598099674926669, 0.34207973179285761], [3.57715069265772545, 7.92632662577829805], [0.43741599463544162, 1.1774208752428319]]) assert_array_almost_equal(actual, desired, decimal=14) def test_normal(self): np.random.seed(self.seed) actual = np.random.normal(loc=.123456789, scale=2.0, size=(3, 2)) desired = np.array([[2.80378370443726244, 3.59863924443872163], [3.121433477601256, -0.33382987590723379], [4.18552478636557357, 4.46410668111310471]]) assert_array_almost_equal(actual, desired, decimal=15) def test_normal_0(self): assert_equal(np.random.normal(scale=0), 0) assert_raises(ValueError, np.random.normal, scale=-0.) def test_pareto(self): np.random.seed(self.seed) actual = np.random.pareto(a=.123456789, size=(3, 2)) desired = np.array( [[2.46852460439034849e+03, 1.41286880810518346e+03], [5.28287797029485181e+07, 6.57720981047328785e+07], [1.40840323350391515e+02, 1.98390255135251704e+05]]) # For some reason on 32-bit x86 Ubuntu 12.10 the [1, 0] entry in this # matrix differs by 24 nulps. Discussion: # https://mail.python.org/pipermail/numpy-discussion/2012-September/063801.html # Consensus is that this is probably some gcc quirk that affects # rounding but not in any important way, so we just use a looser # tolerance on this test: np.testing.assert_array_almost_equal_nulp(actual, desired, nulp=30) def test_poisson(self): np.random.seed(self.seed) actual = np.random.poisson(lam=.123456789, size=(3, 2)) desired = np.array([[0, 0], [1, 0], [0, 0]]) assert_array_equal(actual, desired) def test_poisson_exceptions(self): lambig = np.iinfo('l').max lamneg = -1 assert_raises(ValueError, np.random.poisson, lamneg) assert_raises(ValueError, np.random.poisson, [lamneg]*10) assert_raises(ValueError, np.random.poisson, lambig) assert_raises(ValueError, np.random.poisson, [lambig]*10) def test_power(self): np.random.seed(self.seed) actual = np.random.power(a=.123456789, size=(3, 2)) desired = np.array([[0.02048932883240791, 0.01424192241128213], [0.38446073748535298, 0.39499689943484395], [0.00177699707563439, 0.13115505880863756]]) assert_array_almost_equal(actual, desired, decimal=15) def test_rayleigh(self): np.random.seed(self.seed) actual = np.random.rayleigh(scale=10, size=(3, 2)) desired = np.array([[13.8882496494248393, 13.383318339044731], [20.95413364294492098, 21.08285015800712614], [11.06066537006854311, 17.35468505778271009]]) assert_array_almost_equal(actual, desired, decimal=14) def test_rayleigh_0(self): assert_equal(np.random.rayleigh(scale=0), 0) assert_raises(ValueError, np.random.rayleigh, scale=-0.) def test_standard_cauchy(self): np.random.seed(self.seed) actual = np.random.standard_cauchy(size=(3, 2)) desired = np.array([[0.77127660196445336, -6.55601161955910605], [0.93582023391158309, -2.07479293013759447], [-4.74601644297011926, 0.18338989290760804]]) assert_array_almost_equal(actual, desired, decimal=15) def test_standard_exponential(self): np.random.seed(self.seed) actual = np.random.standard_exponential(size=(3, 2)) desired = np.array([[0.96441739162374596, 0.89556604882105506], [2.1953785836319808, 2.22243285392490542], [0.6116915921431676, 1.50592546727413201]]) assert_array_almost_equal(actual, desired, decimal=15) def test_standard_gamma(self): np.random.seed(self.seed) actual = np.random.standard_gamma(shape=3, size=(3, 2)) desired = np.array([[5.50841531318455058, 6.62953470301903103], [5.93988484943779227, 2.31044849402133989], [7.54838614231317084, 8.012756093271868]]) assert_array_almost_equal(actual, desired, decimal=14) def test_standard_gamma_0(self): assert_equal(np.random.standard_gamma(shape=0), 0) assert_raises(ValueError, np.random.standard_gamma, shape=-0.) def test_standard_normal(self): np.random.seed(self.seed) actual = np.random.standard_normal(size=(3, 2)) desired = np.array([[1.34016345771863121, 1.73759122771936081], [1.498988344300628, -0.2286433324536169], [2.031033998682787, 2.17032494605655257]]) assert_array_almost_equal(actual, desired, decimal=15) def test_standard_t(self): np.random.seed(self.seed) actual = np.random.standard_t(df=10, size=(3, 2)) desired = np.array([[0.97140611862659965, -0.08830486548450577], [1.36311143689505321, -0.55317463909867071], [-0.18473749069684214, 0.61181537341755321]]) assert_array_almost_equal(actual, desired, decimal=15) def test_triangular(self): np.random.seed(self.seed) actual = np.random.triangular(left=5.12, mode=10.23, right=20.34, size=(3, 2)) desired = np.array([[12.68117178949215784, 12.4129206149193152], [16.20131377335158263, 16.25692138747600524], [11.20400690911820263, 14.4978144835829923]]) assert_array_almost_equal(actual, desired, decimal=14) def test_uniform(self): np.random.seed(self.seed) actual = np.random.uniform(low=1.23, high=10.54, size=(3, 2)) desired = np.array([[6.99097932346268003, 6.73801597444323974], [9.50364421400426274, 9.53130618907631089], [5.48995325769805476, 8.47493103280052118]]) assert_array_almost_equal(actual, desired, decimal=15) def test_uniform_range_bounds(self): fmin = np.finfo('float').min fmax = np.finfo('float').max func = np.random.uniform assert_raises(OverflowError, func, -np.inf, 0) assert_raises(OverflowError, func, 0, np.inf) assert_raises(OverflowError, func, fmin, fmax) assert_raises(OverflowError, func, [-np.inf], [0]) assert_raises(OverflowError, func, [0], [np.inf]) # (fmax / 1e17) - fmin is within range, so this should not throw # account for i386 extended precision DBL_MAX / 1e17 + DBL_MAX > # DBL_MAX by increasing fmin a bit np.random.uniform(low=np.nextafter(fmin, 1), high=fmax / 1e17) def test_scalar_exception_propagation(self): # Tests that exceptions are correctly propagated in distributions # when called with objects that throw exceptions when converted to # scalars. # # Regression test for gh: 8865 class ThrowingFloat(np.ndarray): def __float__(self): raise TypeError throwing_float = np.array(1.0).view(ThrowingFloat) assert_raises(TypeError, np.random.uniform, throwing_float, throwing_float) class ThrowingInteger(np.ndarray): def __int__(self): raise TypeError __index__ = __int__ throwing_int = np.array(1).view(ThrowingInteger) assert_raises(TypeError, np.random.hypergeometric, throwing_int, 1, 1) def test_vonmises(self): np.random.seed(self.seed) actual = np.random.vonmises(mu=1.23, kappa=1.54, size=(3, 2)) desired = np.array([[2.28567572673902042, 2.89163838442285037], [0.38198375564286025, 2.57638023113890746], [1.19153771588353052, 1.83509849681825354]]) assert_array_almost_equal(actual, desired, decimal=15) def test_vonmises_small(self): # check infinite loop, gh-4720 np.random.seed(self.seed) r = np.random.vonmises(mu=0., kappa=1.1e-8, size=10**6) np.testing.assert_(np.isfinite(r).all()) def test_wald(self): np.random.seed(self.seed) actual = np.random.wald(mean=1.23, scale=1.54, size=(3, 2)) desired = np.array([[3.82935265715889983, 5.13125249184285526], [0.35045403618358717, 1.50832396872003538], [0.24124319895843183, 0.22031101461955038]]) assert_array_almost_equal(actual, desired, decimal=14) def test_weibull(self): np.random.seed(self.seed) actual = np.random.weibull(a=1.23, size=(3, 2)) desired = np.array([[0.97097342648766727, 0.91422896443565516], [1.89517770034962929, 1.91414357960479564], [0.67057783752390987, 1.39494046635066793]]) assert_array_almost_equal(actual, desired, decimal=15) def test_weibull_0(self): np.random.seed(self.seed) assert_equal(np.random.weibull(a=0, size=12), np.zeros(12)) assert_raises(ValueError, np.random.weibull, a=-0.) def test_zipf(self): np.random.seed(self.seed) actual = np.random.zipf(a=1.23, size=(3, 2)) desired = np.array([[66, 29], [1, 1], [3, 13]]) assert_array_equal(actual, desired) class TestBroadcast(object): # tests that functions that broadcast behave # correctly when presented with non-scalar arguments def setup(self): self.seed = 123456789 def setSeed(self): np.random.seed(self.seed) # TODO: Include test for randint once it can broadcast # Can steal the test written in PR #6938 def test_uniform(self): low = [0] high = [1] uniform = np.random.uniform desired = np.array([0.53283302478975902, 0.53413660089041659, 0.50955303552646702]) self.setSeed() actual = uniform(low * 3, high) assert_array_almost_equal(actual, desired, decimal=14) self.setSeed() actual = uniform(low, high * 3) assert_array_almost_equal(actual, desired, decimal=14) def test_normal(self): loc = [0] scale = [1] bad_scale = [-1] normal = np.random.normal desired = np.array([2.2129019979039612, 2.1283977976520019, 1.8417114045748335]) self.setSeed() actual = normal(loc * 3, scale) assert_array_almost_equal(actual, desired, decimal=14) assert_raises(ValueError, normal, loc * 3, bad_scale) self.setSeed() actual = normal(loc, scale * 3) assert_array_almost_equal(actual, desired, decimal=14) assert_raises(ValueError, normal, loc, bad_scale * 3) def test_beta(self): a = [1] b = [2] bad_a = [-1] bad_b = [-2] beta = np.random.beta desired = np.array([0.19843558305989056, 0.075230336409423643, 0.24976865978980844]) self.setSeed() actual = beta(a * 3, b) assert_array_almost_equal(actual, desired, decimal=14) assert_raises(ValueError, beta, bad_a * 3, b) assert_raises(ValueError, beta, a * 3, bad_b) self.setSeed() actual = beta(a, b * 3) assert_array_almost_equal(actual, desired, decimal=14) assert_raises(ValueError, beta, bad_a, b * 3) assert_raises(ValueError, beta, a, bad_b * 3) def test_exponential(self): scale = [1] bad_scale = [-1] exponential = np.random.exponential desired = np.array([0.76106853658845242, 0.76386282278691653, 0.71243813125891797]) self.setSeed() actual = exponential(scale * 3) assert_array_almost_equal(actual, desired, decimal=14) assert_raises(ValueError, exponential, bad_scale * 3) def test_standard_gamma(self): shape = [1] bad_shape = [-1] std_gamma = np.random.standard_gamma desired = np.array([0.76106853658845242, 0.76386282278691653, 0.71243813125891797]) self.setSeed() actual = std_gamma(shape * 3) assert_array_almost_equal(actual, desired, decimal=14) assert_raises(ValueError, std_gamma, bad_shape * 3) def test_gamma(self): shape = [1] scale = [2] bad_shape = [-1] bad_scale = [-2] gamma = np.random.gamma desired = np.array([1.5221370731769048, 1.5277256455738331, 1.4248762625178359]) self.setSeed() actual = gamma(shape * 3, scale) assert_array_almost_equal(actual, desired, decimal=14) assert_raises(ValueError, gamma, bad_shape * 3, scale) assert_raises(ValueError, gamma, shape * 3, bad_scale) self.setSeed() actual = gamma(shape, scale * 3) assert_array_almost_equal(actual, desired, decimal=14) assert_raises(ValueError, gamma, bad_shape, scale * 3) assert_raises(ValueError, gamma, shape, bad_scale * 3) def test_f(self): dfnum = [1] dfden = [2] bad_dfnum = [-1] bad_dfden = [-2] f = np.random.f desired = np.array([0.80038951638264799, 0.86768719635363512, 2.7251095168386801]) self.setSeed() actual = f(dfnum * 3, dfden) assert_array_almost_equal(actual, desired, decimal=14) assert_raises(ValueError, f, bad_dfnum * 3, dfden) assert_raises(ValueError, f, dfnum * 3, bad_dfden) self.setSeed() actual = f(dfnum, dfden * 3) assert_array_almost_equal(actual, desired, decimal=14) assert_raises(ValueError, f, bad_dfnum, dfden * 3) assert_raises(ValueError, f, dfnum, bad_dfden * 3) def test_noncentral_f(self): dfnum = [2] dfden = [3] nonc = [4] bad_dfnum = [0] bad_dfden = [-1] bad_nonc = [-2] nonc_f = np.random.noncentral_f desired = np.array([9.1393943263705211, 13.025456344595602, 8.8018098359100545]) self.setSeed() actual = nonc_f(dfnum * 3, dfden, nonc) assert_array_almost_equal(actual, desired, decimal=14) assert_raises(ValueError, nonc_f, bad_dfnum * 3, dfden, nonc) assert_raises(ValueError, nonc_f, dfnum * 3, bad_dfden, nonc) assert_raises(ValueError, nonc_f, dfnum * 3, dfden, bad_nonc) self.setSeed() actual = nonc_f(dfnum, dfden * 3, nonc) assert_array_almost_equal(actual, desired, decimal=14) assert_raises(ValueError, nonc_f, bad_dfnum, dfden * 3, nonc) assert_raises(ValueError, nonc_f, dfnum, bad_dfden * 3, nonc) assert_raises(ValueError, nonc_f, dfnum, dfden * 3, bad_nonc) self.setSeed() actual = nonc_f(dfnum, dfden, nonc * 3) assert_array_almost_equal(actual, desired, decimal=14) assert_raises(ValueError, nonc_f, bad_dfnum, dfden, nonc * 3) assert_raises(ValueError, nonc_f, dfnum, bad_dfden, nonc * 3) assert_raises(ValueError, nonc_f, dfnum, dfden, bad_nonc * 3) def test_noncentral_f_small_df(self): self.setSeed() desired = np.array([6.869638627492048, 0.785880199263955]) actual = np.random.noncentral_f(0.9, 0.9, 2, size=2) assert_array_almost_equal(actual, desired, decimal=14) def test_chisquare(self): df = [1] bad_df = [-1] chisquare = np.random.chisquare desired = np.array([0.57022801133088286, 0.51947702108840776, 0.1320969254923558]) self.setSeed() actual = chisquare(df * 3) assert_array_almost_equal(actual, desired, decimal=14) assert_raises(ValueError, chisquare, bad_df * 3) def test_noncentral_chisquare(self): df = [1] nonc = [2] bad_df = [-1] bad_nonc = [-2] nonc_chi = np.random.noncentral_chisquare desired = np.array([9.0015599467913763, 4.5804135049718742, 6.0872302432834564]) self.setSeed() actual = nonc_chi(df * 3, nonc) assert_array_almost_equal(actual, desired, decimal=14) assert_raises(ValueError, nonc_chi, bad_df * 3, nonc) assert_raises(ValueError, nonc_chi, df * 3, bad_nonc) self.setSeed() actual = nonc_chi(df, nonc * 3) assert_array_almost_equal(actual, desired, decimal=14) assert_raises(ValueError, nonc_chi, bad_df, nonc * 3) assert_raises(ValueError, nonc_chi, df, bad_nonc * 3) def test_standard_t(self): df = [1] bad_df = [-1] t = np.random.standard_t desired = np.array([3.0702872575217643, 5.8560725167361607, 1.0274791436474273]) self.setSeed() actual = t(df * 3) assert_array_almost_equal(actual, desired, decimal=14) assert_raises(ValueError, t, bad_df * 3) def test_vonmises(self): mu = [2] kappa = [1] bad_kappa = [-1] vonmises = np.random.vonmises desired = np.array([2.9883443664201312, -2.7064099483995943, -1.8672476700665914]) self.setSeed() actual = vonmises(mu * 3, kappa) assert_array_almost_equal(actual, desired, decimal=14) assert_raises(ValueError, vonmises, mu * 3, bad_kappa) self.setSeed() actual = vonmises(mu, kappa * 3) assert_array_almost_equal(actual, desired, decimal=14) assert_raises(ValueError, vonmises, mu, bad_kappa * 3) def test_pareto(self): a = [1] bad_a = [-1] pareto = np.random.pareto desired = np.array([1.1405622680198362, 1.1465519762044529, 1.0389564467453547]) self.setSeed() actual = pareto(a * 3) assert_array_almost_equal(actual, desired, decimal=14) assert_raises(ValueError, pareto, bad_a * 3) def test_weibull(self): a = [1] bad_a = [-1] weibull = np.random.weibull desired = np.array([0.76106853658845242, 0.76386282278691653, 0.71243813125891797]) self.setSeed() actual = weibull(a * 3) assert_array_almost_equal(actual, desired, decimal=14) assert_raises(ValueError, weibull, bad_a * 3) def test_power(self): a = [1] bad_a = [-1] power = np.random.power desired = np.array([0.53283302478975902, 0.53413660089041659, 0.50955303552646702]) self.setSeed() actual = power(a * 3) assert_array_almost_equal(actual, desired, decimal=14) assert_raises(ValueError, power, bad_a * 3) def test_laplace(self): loc = [0] scale = [1] bad_scale = [-1] laplace = np.random.laplace desired = np.array([0.067921356028507157, 0.070715642226971326, 0.019290950698972624]) self.setSeed() actual = laplace(loc * 3, scale) assert_array_almost_equal(actual, desired, decimal=14) assert_raises(ValueError, laplace, loc * 3, bad_scale) self.setSeed() actual = laplace(loc, scale * 3) assert_array_almost_equal(actual, desired, decimal=14) assert_raises(ValueError, laplace, loc, bad_scale * 3) def test_gumbel(self): loc = [0] scale = [1] bad_scale = [-1] gumbel = np.random.gumbel desired = np.array([0.2730318639556768, 0.26936705726291116, 0.33906220393037939]) self.setSeed() actual = gumbel(loc * 3, scale) assert_array_almost_equal(actual, desired, decimal=14) assert_raises(ValueError, gumbel, loc * 3, bad_scale) self.setSeed() actual = gumbel(loc, scale * 3) assert_array_almost_equal(actual, desired, decimal=14) assert_raises(ValueError, gumbel, loc, bad_scale * 3) def test_logistic(self): loc = [0] scale = [1] bad_scale = [-1] logistic = np.random.logistic desired = np.array([0.13152135837586171, 0.13675915696285773, 0.038216792802833396]) self.setSeed() actual = logistic(loc * 3, scale) assert_array_almost_equal(actual, desired, decimal=14) assert_raises(ValueError, logistic, loc * 3, bad_scale) self.setSeed() actual = logistic(loc, scale * 3) assert_array_almost_equal(actual, desired, decimal=14) assert_raises(ValueError, logistic, loc, bad_scale * 3) def test_lognormal(self): mean = [0] sigma = [1] bad_sigma = [-1] lognormal = np.random.lognormal desired = np.array([9.1422086044848427, 8.4013952870126261, 6.3073234116578671]) self.setSeed() actual = lognormal(mean * 3, sigma) assert_array_almost_equal(actual, desired, decimal=14) assert_raises(ValueError, lognormal, mean * 3, bad_sigma) self.setSeed() actual = lognormal(mean, sigma * 3) assert_array_almost_equal(actual, desired, decimal=14) assert_raises(ValueError, lognormal, mean, bad_sigma * 3) def test_rayleigh(self): scale = [1] bad_scale = [-1] rayleigh = np.random.rayleigh desired = np.array([1.2337491937897689, 1.2360119924878694, 1.1936818095781789]) self.setSeed() actual = rayleigh(scale * 3) assert_array_almost_equal(actual, desired, decimal=14) assert_raises(ValueError, rayleigh, bad_scale * 3) def test_wald(self): mean = [0.5] scale = [1] bad_mean = [0] bad_scale = [-2] wald = np.random.wald desired = np.array([0.11873681120271318, 0.12450084820795027, 0.9096122728408238]) self.setSeed() actual = wald(mean * 3, scale) assert_array_almost_equal(actual, desired, decimal=14) assert_raises(ValueError, wald, bad_mean * 3, scale) assert_raises(ValueError, wald, mean * 3, bad_scale) self.setSeed() actual = wald(mean, scale * 3) assert_array_almost_equal(actual, desired, decimal=14) assert_raises(ValueError, wald, bad_mean, scale * 3) assert_raises(ValueError, wald, mean, bad_scale * 3) assert_raises(ValueError, wald, 0.0, 1) assert_raises(ValueError, wald, 0.5, 0.0) def test_triangular(self): left = [1] right = [3] mode = [2] bad_left_one = [3] bad_mode_one = [4] bad_left_two, bad_mode_two = right * 2 triangular = np.random.triangular desired = np.array([2.03339048710429, 2.0347400359389356, 2.0095991069536208]) self.setSeed() actual = triangular(left * 3, mode, right) assert_array_almost_equal(actual, desired, decimal=14) assert_raises(ValueError, triangular, bad_left_one * 3, mode, right) assert_raises(ValueError, triangular, left * 3, bad_mode_one, right) assert_raises(ValueError, triangular, bad_left_two * 3, bad_mode_two, right) self.setSeed() actual = triangular(left, mode * 3, right) assert_array_almost_equal(actual, desired, decimal=14) assert_raises(ValueError, triangular, bad_left_one, mode * 3, right) assert_raises(ValueError, triangular, left, bad_mode_one * 3, right) assert_raises(ValueError, triangular, bad_left_two, bad_mode_two * 3, right) self.setSeed() actual = triangular(left, mode, right * 3) assert_array_almost_equal(actual, desired, decimal=14) assert_raises(ValueError, triangular, bad_left_one, mode, right * 3) assert_raises(ValueError, triangular, left, bad_mode_one, right * 3) assert_raises(ValueError, triangular, bad_left_two, bad_mode_two, right * 3) def test_binomial(self): n = [1] p = [0.5] bad_n = [-1] bad_p_one = [-1] bad_p_two = [1.5] binom = np.random.binomial desired = np.array([1, 1, 1]) self.setSeed() actual = binom(n * 3, p) assert_array_equal(actual, desired) assert_raises(ValueError, binom, bad_n * 3, p) assert_raises(ValueError, binom, n * 3, bad_p_one) assert_raises(ValueError, binom, n * 3, bad_p_two) self.setSeed() actual = binom(n, p * 3) assert_array_equal(actual, desired) assert_raises(ValueError, binom, bad_n, p * 3) assert_raises(ValueError, binom, n, bad_p_one * 3) assert_raises(ValueError, binom, n, bad_p_two * 3) def test_negative_binomial(self): n = [1] p = [0.5] bad_n = [-1] bad_p_one = [-1] bad_p_two = [1.5] neg_binom = np.random.negative_binomial desired = np.array([1, 0, 1]) self.setSeed() actual = neg_binom(n * 3, p) assert_array_equal(actual, desired) assert_raises(ValueError, neg_binom, bad_n * 3, p) assert_raises(ValueError, neg_binom, n * 3, bad_p_one) assert_raises(ValueError, neg_binom, n * 3, bad_p_two) self.setSeed() actual = neg_binom(n, p * 3) assert_array_equal(actual, desired) assert_raises(ValueError, neg_binom, bad_n, p * 3) assert_raises(ValueError, neg_binom, n, bad_p_one * 3) assert_raises(ValueError, neg_binom, n, bad_p_two * 3) def test_poisson(self): max_lam = np.random.RandomState()._poisson_lam_max lam = [1] bad_lam_one = [-1] bad_lam_two = [max_lam * 2] poisson = np.random.poisson desired = np.array([1, 1, 0]) self.setSeed() actual = poisson(lam * 3) assert_array_equal(actual, desired) assert_raises(ValueError, poisson, bad_lam_one * 3) assert_raises(ValueError, poisson, bad_lam_two * 3) def test_zipf(self): a = [2] bad_a = [0] zipf = np.random.zipf desired = np.array([2, 2, 1]) self.setSeed() actual = zipf(a * 3) assert_array_equal(actual, desired) assert_raises(ValueError, zipf, bad_a * 3) with np.errstate(invalid='ignore'): assert_raises(ValueError, zipf, np.nan) assert_raises(ValueError, zipf, [0, 0, np.nan]) def test_geometric(self): p = [0.5] bad_p_one = [-1] bad_p_two = [1.5] geom = np.random.geometric desired = np.array([2, 2, 2]) self.setSeed() actual = geom(p * 3) assert_array_equal(actual, desired) assert_raises(ValueError, geom, bad_p_one * 3) assert_raises(ValueError, geom, bad_p_two * 3) def test_hypergeometric(self): ngood = [1] nbad = [2] nsample = [2] bad_ngood = [-1] bad_nbad = [-2] bad_nsample_one = [0] bad_nsample_two = [4] hypergeom = np.random.hypergeometric desired = np.array([1, 1, 1]) self.setSeed() actual = hypergeom(ngood * 3, nbad, nsample) assert_array_equal(actual, desired) assert_raises(ValueError, hypergeom, bad_ngood * 3, nbad, nsample) assert_raises(ValueError, hypergeom, ngood * 3, bad_nbad, nsample) assert_raises(ValueError, hypergeom, ngood * 3, nbad, bad_nsample_one) assert_raises(ValueError, hypergeom, ngood * 3, nbad, bad_nsample_two) self.setSeed() actual = hypergeom(ngood, nbad * 3, nsample) assert_array_equal(actual, desired) assert_raises(ValueError, hypergeom, bad_ngood, nbad * 3, nsample) assert_raises(ValueError, hypergeom, ngood, bad_nbad * 3, nsample) assert_raises(ValueError, hypergeom, ngood, nbad * 3, bad_nsample_one) assert_raises(ValueError, hypergeom, ngood, nbad * 3, bad_nsample_two) self.setSeed() actual = hypergeom(ngood, nbad, nsample * 3) assert_array_equal(actual, desired) assert_raises(ValueError, hypergeom, bad_ngood, nbad, nsample * 3) assert_raises(ValueError, hypergeom, ngood, bad_nbad, nsample * 3) assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_one * 3) assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_two * 3) def test_logseries(self): p = [0.5] bad_p_one = [2] bad_p_two = [-1] logseries = np.random.logseries desired = np.array([1, 1, 1]) self.setSeed() actual = logseries(p * 3) assert_array_equal(actual, desired) assert_raises(ValueError, logseries, bad_p_one * 3) assert_raises(ValueError, logseries, bad_p_two * 3) class TestThread(object): # make sure each state produces the same sequence even in threads def setup(self): self.seeds = range(4) def check_function(self, function, sz): from threading import Thread out1 = np.empty((len(self.seeds),) + sz) out2 = np.empty((len(self.seeds),) + sz) # threaded generation t = [Thread(target=function, args=(np.random.RandomState(s), o)) for s, o in zip(self.seeds, out1)] [x.start() for x in t] [x.join() for x in t] # the same serial for s, o in zip(self.seeds, out2): function(np.random.RandomState(s), o) # these platforms change x87 fpu precision mode in threads if np.intp().dtype.itemsize == 4 and sys.platform == "win32": assert_array_almost_equal(out1, out2) else: assert_array_equal(out1, out2) def test_normal(self): def gen_random(state, out): out[...] = state.normal(size=10000) self.check_function(gen_random, sz=(10000,)) def test_exp(self): def gen_random(state, out): out[...] = state.exponential(scale=np.ones((100, 1000))) self.check_function(gen_random, sz=(100, 1000)) def test_multinomial(self): def gen_random(state, out): out[...] = state.multinomial(10, [1/6.]*6, size=10000) self.check_function(gen_random, sz=(10000, 6)) # See Issue #4263 class TestSingleEltArrayInput(object): def setup(self): self.argOne = np.array([2]) self.argTwo = np.array([3]) self.argThree = np.array([4]) self.tgtShape = (1,) def test_one_arg_funcs(self): funcs = (np.random.exponential, np.random.standard_gamma, np.random.chisquare, np.random.standard_t, np.random.pareto, np.random.weibull, np.random.power, np.random.rayleigh, np.random.poisson, np.random.zipf, np.random.geometric, np.random.logseries) probfuncs = (np.random.geometric, np.random.logseries) for func in funcs: if func in probfuncs: # p < 1.0 out = func(np.array([0.5])) else: out = func(self.argOne) assert_equal(out.shape, self.tgtShape) def test_two_arg_funcs(self): funcs = (np.random.uniform, np.random.normal, np.random.beta, np.random.gamma, np.random.f, np.random.noncentral_chisquare, np.random.vonmises, np.random.laplace, np.random.gumbel, np.random.logistic, np.random.lognormal, np.random.wald, np.random.binomial, np.random.negative_binomial) probfuncs = (np.random.binomial, np.random.negative_binomial) for func in funcs: if func in probfuncs: # p <= 1 argTwo = np.array([0.5]) else: argTwo = self.argTwo out = func(self.argOne, argTwo) assert_equal(out.shape, self.tgtShape) out = func(self.argOne[0], argTwo) assert_equal(out.shape, self.tgtShape) out = func(self.argOne, argTwo[0]) assert_equal(out.shape, self.tgtShape) # TODO: Uncomment once randint can broadcast arguments # def test_randint(self): # itype = [bool, np.int8, np.uint8, np.int16, np.uint16, # np.int32, np.uint32, np.int64, np.uint64] # func = np.random.randint # high = np.array([1]) # low = np.array([0]) # # for dt in itype: # out = func(low, high, dtype=dt) # self.assert_equal(out.shape, self.tgtShape) # # out = func(low[0], high, dtype=dt) # self.assert_equal(out.shape, self.tgtShape) # # out = func(low, high[0], dtype=dt) # self.assert_equal(out.shape, self.tgtShape) def test_three_arg_funcs(self): funcs = [np.random.noncentral_f, np.random.triangular, np.random.hypergeometric] for func in funcs: out = func(self.argOne, self.argTwo, self.argThree) assert_equal(out.shape, self.tgtShape) out = func(self.argOne[0], self.argTwo, self.argThree) assert_equal(out.shape, self.tgtShape) out = func(self.argOne, self.argTwo[0], self.argThree) assert_equal(out.shape, self.tgtShape)
jorisvandenbossche/numpy
numpy/random/tests/test_random.py
Python
bsd-3-clause
66,842
[ "Gaussian" ]
ea1fe40ce4f9e4fd55ab6b5ff0950ce30253aa411d7ed83d5e5994818ae80007
from pyhamimports import * from gui_utils import * from spectrum import * class Eyecheck(QMainWindow): def __init__(self, specObj, options): super().__init__() # Store Input Variables self.specObj = specObj # The Spectrum object created in the pyhammer script self.options = options # The list of options input by the user in the pyhammer script # Create and show the GUI self.defineUsefulVars() # Setup some basic variables to be used later self.readOutfile() # Read the output file created self.createGui() # Define and layout the GUI elements self.selectInitialSpectrum()# Determine which spectrum to display first if self.specIndex == -1: # If no initial spectrum is chosen qApp.quit() # Quit the QApplication return # Return back to the main pyhammer routine self.loadUserSpectrum() # Otherwise, load the appropriate spectrum to be displayed self.updatePlot() # Update the plot showing the template and spectrum self.show() # Show the final GUI window to the user ### # Initialization Methods # def defineUsefulVars(self): """ Description: This method simply defines some useful variables as part of the class that can be used in various places. """ # Define some basic spectra related information self.specType = ['O', 'B', 'A', 'F', 'G', 'K', 'M', 'L', 'dC', 'DA'] self.subType = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'] self.subTypeC = ['G', 'K', 'M', '', '', '', '', '', '', ''] self.subTypeWD = ['0.5', '1.0', '1.5', '2.0', '2.5', '3.5', '5.0', '5.5', '6.5', '7.0'] self.metalType = ['-2.0', '-1.5', '-1.0', '-0.5', '+0.0', '+0.5', '+1.0'] self.templateDir = os.path.join(os.path.split(__file__)[0], 'resources', 'templates') self.SB2templateDir = os.path.join(os.path.split(__file__)[0], 'resources', 'templates_SB2') # Define an index to point to the current spectra in the list # of spectra in the output file. We will start by assuming we're # looking at the first spectra in the list. The selectInitialSpectrum # method will update this as necessary. self.specIndex = 0 # Define plot related variables plt.style.use('ggplot') self.full_xlim = None # +-- self.full_ylim = None # | Store these to keep track self.zoomed_xlim = None # | of zoom states on the plot self.zoomed_ylim = None # | self.zoomed = False # +-- # Define the help strings to display when the user chooses a help # option from the menu bar self.helpStr = ( 'Welcome to the main GUI for spectral typing your spectra.\n\n' 'Each spectra in your spectra list file will be loaded in ' 'sequence and shown on top of the template it was matched ' 'to. From here, fine tune the spectral type by comparing ' 'your spectrum to the templates and choose "Next" when ' "you've landed on the correct choice. Continue through each " 'spectrum until finished.') self.buttonStr = ( 'Upon opening the Eyecheck program, the first spectrum in your ' 'list will be loaded and displayed on top of the template determined ' 'by the auto-classification algorithm.\n\n' 'Use the "Earlier" and "Later" buttons to change the spectrum ' 'templates. Note that not all templates exist for all spectral ' 'types. This program specifically disallows choosing K8 and K9 ' 'spectral types as well.\n\n' 'The "Higher" and "Lower" buttons change the metallicity. Again, ' 'not all metallicities exist as templates.\n\n' 'The "Odd" button allows you to mark a spectrum as something other ' 'than a standard classification, such as a white dwarf or galaxy.\n\n' 'The "Bad" button simply marks the spectrum as BAD in the output ' 'file, indicating it is not able to be classified.\n\n' 'You can cycle between your spectra using the "Back" and "Next" buttons. ' 'Note that hitting "Next" will save the currently selected state as ' 'the classification for that spectra.') self.keyStr = ( 'The following keys are mapped to specific actions.\n\n' '<Left>\t\tEarlier spectral type button\n' '<Right>\tLater spectral type button\n' '<Up>\t\tHigher metallicity button\n' '<Down>\tLower metallicity button\n' '<Enter>\tAccept spectral classification\n' '<Ctrl-K>\tMove to previous spectrum\n' '<Ctrl-O>\tClassify spectrum as odd\n' '<Ctrl-B>\tClassify spectrum as bad\n' '<Ctrl-E>\tToggle the template error\n' '<Ctrl-S>\tSmooth/Unsmooth the spectrum\n' '<Ctrl-L>\tLock the smooth state between spectra\n' '<Ctrl-R>\tToggle removing the stiching spike in SDSS spectra\n' '<Ctrl-Q>\tQuit PyHammer\n' '<Ctrl-P>') self.tipStr = ( 'The following are a set of tips for useful features of the ' 'program.\n\n' 'The drop down list in the upper left of the Eyecheck window ' 'displays all the spectra in your list. Select a different spectra ' 'from this drop down to automatically jump to a different spectra. ' 'This will save the choice for the current spectrum.\n\n' 'Any zoom applied to the plot is held constant between switching ' 'templates. This makes it easy to compare templates around specific ' 'features or spectral lines. Hit the home button on the plot ' 'to return to the original zoom level.\n\n' 'The smooth menu option will allow you to smooth or unsmooth ' 'your spectra in the event that it is noisy. This simply applies ' 'a boxcar convolution across your spectrum, leaving the edges unsmoothed.\n\n' 'By default, every new, loaded spectrum will be unsmoothed and ' 'the smooth button state reset. You can choose to keep the smooth ' 'button state between loading spectrum by selecting the menu option ' '"Lock Smooth State".\n\n' 'In SDSS spectra, there is a spike that occurs between 5569 and 5588 ' 'Angstroms caused by stitching together the results from both detectors.' 'You can choose to artificially remove this spike for easier viewing by' 'selecting the "Remove SDSS Stitch Spike" from the Options menu.\n\n' 'At the bottom of the sidebar next to the figure is the template match ' 'metric. This is a measure of how close a match the current template is ' 'to the spectrum. A lower value indicates a closer match. Conceptually, ' 'this is simply the Euclidean distance between the template and the spectrum ' "Use this to help classify, but don't trust it to be foolproof.\n\n" 'Some keys may need to be hit rapidly.') self.aboutStr = ( 'This project was developed by a select group of graduate students ' 'at the Department of Astronomy at Boston University. The project ' 'was lead by Aurora Kesseli with development help and advice provided ' 'by Andrew West, Mark Veyette, Brandon Harrison, and Dan Feldman. ' 'Contributions were further provided by Dylan Morgan and Chris Theissan.\n\n' 'Additional spectral types for dwarf carbon (dC) star and DA white dwarfs were ' 'added in version 2.0.0 of PyHammer by Benjamin Roulston. This version also ' 'added the ability for classifying double-lined spectroscopic (SB2) binaries.\n\n' 'See the accompanying papers: Kesseli et al. (2017), Roulston, Green, and Kesseli (2020) or the PyHammer GitHub\n' 'site for further details.') # Other variables self.pPressNum = 0 self.pPressTime = 0 def readOutfile(self): """ Description: Opens up the output file contained in the passed in options dict, reads it, and stores the data in a variable for later use. This stored output data will be updated as the user interacts with the program and written to the output file when they close the program. """ with open(self.options['outfile'], 'r') as file: reader = csv.reader(file) self.outData = np.asarray(list(reader)[1:]) # Ignore the header line def selectInitialSpectrum(self): """ Description: Before displaying the main GUI, this method will go through the outfile data and figure out which spectrum from the list to display first. If the user has never classified any data before for this particular file, it will just display the first file in the list. If they've already got results, it will choose the first spectra without results and ask the user if they want to start where they left off. If all the spectra already have user classification results, it will ask if they want to start over. """ # Loop through the outData and see if some classification has occured already for data in self.outData: # If classification by the user has already occured for the # current spectrum, then move our index to the next one. if data[5] != 'nan' or data[6] != 'nan': self.specIndex += 1 else: # Break out if we can get to a spectrum that hasn't been # classified by the user yet break # If the outfile already has eyecheck results, indicated by the fact # that the specIndex isn't pointing to the first spectrum in the list, # ask if they want to start where they left off. if self.specIndex != 0: # If every outfile spectra already has an eyecheck result # they've classified everything and ask them if they want # to start over instead. if self.specIndex == len(self.outData): msg = MessageBox(self, ('Every spectrum in the output file already has\n' 'an eyecheck result. Do you want to start over?'), buttons = QMessageBox.Yes|QMessageBox.No) if msg.reply == QMessageBox.Yes: self.specIndex = 0 else: self.specIndex = -1 # Indicates we don't want to classify anything return else: msg = MessageBox(self, ('The output file already has eyecheck results. The\n' 'eyecheck will start with the next unclassified\n' 'spectrum. Do you want to start over instead?'), buttons = QMessageBox.Yes|QMessageBox.No) if msg.reply == QMessageBox.Yes: self.specIndex = 0 def createGui(self): """ Description: Method to create all the GUI components. This method uses a few helper methods, partiularly for creating the slider sections and button sections. """ # Define the basic, top-level GUI components self.widget = QWidget() # The central widget in the main window self.grid = QGridLayout() # The layout manager of the central widget self.icon = QIcon(os.path.join(os.path.split(__file__)[0],'resources','sun.ico')) # The menu bar self.createMenuBar() # *** Setup the main GUI components *** # The spectrum choosing components label = QLabel('Spectrum', alignment = Qt.AlignCenter) label.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Fixed) self.grid.addWidget(label, 0, 0, 1, 3) self.spectrumList = QComboBox(self.widget) self.spectrumList.addItems([os.path.split(d[0])[-1] for d in self.outData]) self.spectrumList.currentIndexChanged.connect(self.spectrumChosen) self.grid.addWidget(self.spectrumList, 1, 0, 1, 3) # The collection of sliders and their accompanying labels self.specTypeLabel, self.specTypeSlider = self.createSlider('Primary\nStellar\nType', self.specType, self.specTypeChanged) self.subTypeLabel, self.subTypeSlider = self.createSlider('Primary\nStellar\nSubtype', self.subType, self.specSubtypeChanged) self.metalLabel, self.metalSlider = self.createSlider('Primary\nMetallicity\n[Fe/H]', self.metalType, self.metallicityChanged) # The collection of SB2 sliders and their accompanying labels self.specTypeLabelSB2, self.specTypeSliderSB2 = self.createSB2Slider('Secondary\nStellar\nType', self.specType, self.specTypeChanged, 0) self.subTypeLabelSB2, self.subTypeSliderSB2 = self.createSB2Slider('Secondary\nStellar\nSubtype', self.subType, self.specSubtypeChanged, 1) self.SB2button = QCheckBox("SB2") self.SB2button.setChecked(False) for ii in range(len(self.specTypeLabelSB2)): self.specTypeLabelSB2[ii].setDisabled(True) self.subTypeLabelSB2[ii].setDisabled(True) self.SB2button.stateChanged.connect(lambda:self.btnstate(self.SB2button)) self.grid.addWidget(self.SB2button, 3, 2) # The collection of buttons self.createButtons('Change Spectral Type', ['Earlier', 'Later'], ['Move to an eariler spectrum template', 'Move to a later spectrum template'], [self.earlierCallback, self.laterCallback]) self.createButtons('Change Metallicity [Fe/H]', ['Lower', 'Higher'], ['Move to a lower metallicity template', 'Move to a higher metallicity template'], [self.lowerMetalCallback, self.higherMetalCallback]) self.createButtons('Spectrum Choices', ['Odd', 'Bad', 'Previous', 'Next'], ['Classify your spectrum as a non-standard spectral type', 'Classify your spectrum as bad', 'Move to the previous spectrum', 'Classify your spectrum as the current selection and move to the next spectrum'], [self.oddCallback, self.badCallback, self.previousCallback, self.nextCallback]) # The distance metric frame self.distMetric = self.createDistanceMetric() # Create the matplotlib plot self.figure = plt.figure(figsize = (12,6)) self.canvas = FigureCanvas(self.figure) self.toolbar = NavigationToolbar(self.canvas, self) vgrid = QVBoxLayout(spacing = 0) vgrid.addWidget(self.toolbar) vgrid.addWidget(self.canvas, 1) self.grid.addLayout(vgrid, 0, 3, 8, 1) # Map the keyboard shortcuts QShortcut(QKeySequence(Qt.CTRL + Qt.Key_O), self).activated.connect(self.oddCallback) QShortcut(QKeySequence(Qt.CTRL + Qt.Key_B), self).activated.connect(self.badCallback) QShortcut(QKeySequence(Qt.Key_Return), self).activated.connect(self.nextCallback) QShortcut(QKeySequence(Qt.CTRL + Qt.Key_K), self).activated.connect(self.previousCallback) QShortcut(QKeySequence(Qt.Key_Left), self).activated.connect(self.earlierCallback) QShortcut(QKeySequence(Qt.Key_Right), self).activated.connect(self.laterCallback) QShortcut(QKeySequence(Qt.Key_Down), self).activated.connect(self.lowerMetalCallback) QShortcut(QKeySequence(Qt.Key_Up), self).activated.connect(self.higherMetalCallback) QShortcut(QKeySequence(Qt.CTRL + Qt.Key_P), self).activated.connect(self.callback_hammer_time) # *** Setup the Grid *** self.grid.setRowStretch(7, 1) self.grid.setColumnStretch(3, 1) self.grid.setContentsMargins(2,2,2,2) self.grid.setSpacing(5) # *** Set the main window properties *** self.widget.setLayout(self.grid) self.setCentralWidget(self.widget) self.setWindowTitle('PyHammer Eyecheck') self.setWindowIcon(self.icon) def btnstate(self,b): bad_primary = [0, 1, 7, 9] bad_secondary = [0, 1, 2, 7] if b.text() == "SB2": if b.isChecked() == True: for ii in range(len(self.metalLabel)): self.metalLabel[ii].setDisabled(True) for ii in range(len(self.specTypeLabelSB2)): self.specTypeLabelSB2[ii].setDisabled(False) self.subTypeLabelSB2[ii].setDisabled(False) for ii in bad_primary: self.specTypeLabel[ii].setDisabled(True) for ii in bad_secondary: self.specTypeLabelSB2[ii].setDisabled(True) if self.specTypeSlider.sliderPosition() in [0,1]: self.updateSlider(self.specTypeSlider, 2) elif self.specTypeSlider.sliderPosition() in [7, 9]: self.updateSlider(self.specTypeSlider, 8) if self.specTypeSliderSB2.sliderPosition() in [0,1,2]: self.updateSlider(self.specTypeSliderSB2, 3) elif self.specTypeSliderSB2.sliderPosition() in [7]: self.updateSlider(self.specTypeSliderSB2, 6) self.checkSB2combos() else: for ii in range(len(self.metalLabel)): self.metalLabel[ii].setDisabled(False) for ii in range(len(self.specTypeLabelSB2)): self.specTypeLabelSB2[ii].setDisabled(True) self.subTypeLabelSB2[ii].setDisabled(True) for ii in range(len(self.specTypeLabel)): self.specTypeLabel[ii].setDisabled(False) self.subTypeLabel[ii].setDisabled(False) self.updateSliderLabel() self.updatePlot() def createMenuBar(self): """ Description: A helper function of the create GUI method which is used to define the menubar for the GUI. """ # Use the PyQt menu construct. This is particularly important # for Macs because it will keep the menubar with the GUI window # rather than placing it at the top of the screen, as is usual # for Macs. We don't want this to happen because Macs take control # of the menus if you have it up there and can cause unexpected results. self.menuBar().setNativeMenuBar(False) # *** Define Options Menu *** optionsMenu = self.menuBar().addMenu('Options') optionsMenu.setTearOffEnabled(True) # Show Template Error Menu Item self.showTemplateError = QAction('Show Template Error', optionsMenu, checkable = True, shortcut = 'Ctrl+E') self.showTemplateError.setChecked(True) self.showTemplateError.toggled.connect(self.updatePlot) optionsMenu.addAction(self.showTemplateError) # Smooth Spectrum Menu Item self.smoothSpectrum = QAction('Smooth Spectrum', optionsMenu, checkable = True, shortcut = 'Ctrl+S') self.smoothSpectrum.toggled.connect(self.updatePlot) optionsMenu.addAction(self.smoothSpectrum) # Lock Smooth State Menu Item self.lockSmoothState = QAction('Lock Smooth State', optionsMenu, checkable = True, shortcut = 'Ctrl+L') optionsMenu.addAction(self.lockSmoothState) # Remove SDSS Stitch Spike Menu Item self.removeStitchSpike = QAction('Remove SDSS Stitch Spike', optionsMenu, checkable = True, shortcut = 'Ctrl+R') self.removeStitchSpike.toggled.connect(self.updatePlot) optionsMenu.addAction(self.removeStitchSpike) optionsMenu.addSeparator() # Quit Menu Item quitMenuItem = QAction('Quit', optionsMenu, shortcut = 'Ctrl+Q') quitMenuItem.triggered.connect(self.close) optionsMenu.addAction(quitMenuItem) # *** Define Help Menu *** helpMenu = self.menuBar().addMenu('Help') # Help Menu Item showHelpWindow = QAction('Eyecheck Help', helpMenu) showHelpWindow.triggered.connect(lambda: MessageBox(self, self.helpStr, title = 'Help')) helpMenu.addAction(showHelpWindow) # Buttons Menu Item showButtonsWindow = QAction('Buttons', helpMenu) showButtonsWindow.triggered.connect(lambda: MessageBox(self, self.buttonStr, title = 'Buttons')) helpMenu.addAction(showButtonsWindow) # Keyboard Shortcuts Menu Item showKeyboardShortcutWindow = QAction('Keyboard Shortcuts', helpMenu) showKeyboardShortcutWindow.triggered.connect(lambda: MessageBox(self, self.keyStr, title = 'Keyboard Shortcuts')) helpMenu.addAction(showKeyboardShortcutWindow) # Tips Menu Item showTipsWindow = QAction('Tips', helpMenu) showTipsWindow.triggered.connect(lambda: MessageBox(self, self.tipStr, title = 'Tips')) helpMenu.addAction(showTipsWindow) # Separator helpMenu.addSeparator() # About Menu Item showAboutWindow = QAction('About', helpMenu) showAboutWindow.triggered.connect(lambda: MessageBox(self, self.aboutStr, title = 'About')) helpMenu.addAction(showAboutWindow) def createSlider(self, title, labels, callback): """ Description: A helper method of the create GUI method. This method creates a frame and puts a label at the top, a vertical slider, and a collection of labels next to the slider. Note that both the slider and labels use customized objects from the gui_utils class which were written on top of the respective QWidgets and provide additional functionality. See those respective classes for details. Input: title: The label to put at the top of the frame as the title for the section. labels: The labels to use for the slider. The slider itself will be given a set of discrete options to match the number of labels provided. callback: The callback to use when the slider is set to a new value. Return: Returns the slider and collection of label objects. These are returned so the GUI can interact with the labels and slider later on. """ # Define or update the column of the top-level grid to # put this slider component into if not hasattr(self, 'column'): self.column = 0 else: self.column += 1 # Create the frame and put it in the top layer grid frame = QFrame(frameShape = QFrame.StyledPanel, frameShadow = QFrame.Sunken, lineWidth = 0) sliderGrid = QGridLayout() frame.setLayout(sliderGrid) self.grid.addWidget(frame, 2, self.column) # Create the label at the top of the frame label = QLabel(title, alignment = Qt.AlignCenter) sliderGrid.addWidget(label, 0, 0, 1, 2) # Add the slider slider = Slider(Qt.Vertical, minimum = 0, maximum = len(labels)-1, tickInterval = 1, pageStep = 1, invertedAppearance = True) slider.valueChanged.connect(callback) slider.setTickPosition(QSlider.TicksLeft) slider.setTracking(False) sliderGrid.addWidget(slider, 1, 1, len(labels), 1) # Add the text labels to the left of the slider tickLabels = [] for i,text in enumerate(labels): label = SliderLabel(text, slider, i) tickLabels.append(label) sliderGrid.addWidget(label, i+1, 0) # Return the labels and slider so we can access them later return tickLabels, slider def createSB2Slider(self, title, labels, callback, column): """ Description: A helper method of the create GUI method. This method creates a frame and puts a label at the top, a vertical slider, and a collection of labels next to the slider. Note that both the slider and labels use customized objects from the gui_utils class which were written on top of the respective QWidgets and provide additional functionality. See those respective classes for details. Input: title: The label to put at the top of the frame as the title for the section. labels: The labels to use for the slider. The slider itself will be given a set of discrete options to match the number of labels provided. callback: The callback to use when the slider is set to a new value. Return: Returns the slider and collection of label objects. These are returned so the GUI can interact with the labels and slider later on. """ # Define or update the column of the top-level grid to # put this slider component into # if not hasattr(self, 'column'): # self.column = 0 # else: # self.column += 1 # Create the frame and put it in the top layer grid frame = QFrame(frameShape = QFrame.StyledPanel, frameShadow = QFrame.Sunken, lineWidth = 0) sliderGrid = QGridLayout() frame.setLayout(sliderGrid) self.grid.addWidget(frame, 3, column) # Create the label at the top of the frame label = QLabel(title, alignment = Qt.AlignCenter) sliderGrid.addWidget(label, 0, 0, 1, 2) # Add the slider slider = Slider(Qt.Vertical, minimum = 0, maximum = len(labels)-1, tickInterval = 1, pageStep = 1, invertedAppearance = True) slider.valueChanged.connect(callback) slider.setTickPosition(QSlider.TicksLeft) slider.setTracking(False) sliderGrid.addWidget(slider, 1, 1, len(labels), 1) # Add the text labels to the left of the slider tickLabels = [] for i,text in enumerate(labels): label = SliderLabel(text, slider, i) tickLabels.append(label) sliderGrid.addWidget(label, i+1, 0) # Return the labels and slider so we can access them later return tickLabels, slider def createButtons(self, title, buttonTexts, tooltips, callbacks): """ Description: Creates the frames with a title at the top and groups of buttons. The tooltips and callbacks are applied to the buttons. This method is designed to allow any number of buttons to be passed in and it will arrange it in a 2xN fashion. Input: title: A label to place at the top of the frame which prodives a title for the group of buttons. buttonTexts: A list of the set of texts to for each button. tooltips: A list of tooltips to assign to each button. callbacks: The list of callbacks to attribute to each button. """ # Define or update the row of the top-level grid to # put this button frame into if not hasattr(self, 'row'): self.row = 4 else: self.row += 1 # Create the frame and put it in the top layer grid frame = QFrame(frameShape = QFrame.StyledPanel, frameShadow = QFrame.Sunken, lineWidth = 0) buttonGrid = QGridLayout(margin = 2, spacing = 2) frame.setLayout(buttonGrid) self.grid.addWidget(frame, self.row, 0, 1, 3) # Add the label which acts as the title of the button frame label = QLabel(title, alignment = Qt.AlignCenter) buttonGrid.addWidget(label, 0, 0, 1, 2) # Add the buttons. This is done by putting the buttons in rows # with two buttons per row until the loop has run out of buttons. # Each button is assigned its callback method. The button variable # itself is not saved as it is not needed. for i,(text,tt,cb) in enumerate(zip(buttonTexts, tooltips, callbacks)): button = QPushButton(text) button.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Fixed) button.setToolTip(tt) button.clicked.connect(cb) buttonGrid.addWidget(button, i//2+1, i%2) def createDistanceMetric(self): # Create the frame frame = QFrame(frameShape = QFrame.StyledPanel, frameShadow = QFrame.Sunken, lineWidth = 0) frame.setToolTip('These metrics indicate how well the current\n' 'template matches the spectrum. The lower\n' 'the value, the better the match.\n\n' 'The first metric is the residual between the input spectrum and the selected template.\n' 'The second metric is the residual between the input spectrum and the selected template, normalized by error.\n' 'The third metric is the distance between the measured line of the input spectrum and the selected template.\n') distGrid = QVBoxLayout(margin = 2, spacing = 2) frame.setLayout(distGrid) self.grid.addWidget(frame, self.row + 1, 0, 1, 3) # Create the distance metric label label = QLabel('Metrics: Resid | NormResid | LineDist', alignment = Qt.AlignCenter) distGrid.addWidget(label) # Create the distance metric value label label = QLabel('0.0', alignment = Qt.AlignCenter) distGrid.addWidget(label) return label ### # Plot Creation Method # def updatePlot(self): """ Description: This is the method which handles all the plotting on the matplotlib plot. It will plot the template (if it exists), the user's spectrum and do things like control the zoom level on the plot. """ # Before updating the plot, check the current axis limits. If they're # set to the full limit values, then the plot wasn't zoomed in on when # they moved to a new plot. If the limits are different, they've zoomed # in and we should store the current plot limits so we can set them # to these limits at the end. if self.full_xlim is not None and self.full_ylim is not None: if (self.full_xlim == plt.gca().get_xlim() and self.full_ylim == plt.gca().get_ylim()): self.zoomed = False else: self.zoomed = True self.zoomed_xlim = plt.gca().get_xlim() self.zoomed_ylim = plt.gca().get_ylim() # *** Define Initial Figure *** with warnings.catch_warnings(): warnings.filterwarnings('ignore') # Ignore depreciation warnings plt.cla() ax = self.figure.add_subplot(111) #self.cursor = Cursor(ax, color = '#C8D2DC', lw = 0.5) # THIS BREAKS THE PLOT! if self.toolbar._active != 'ZOOM': # Make it so the zoom button is selected by default self.toolbar.zoom() # *** Plot the template *** # Determine which, if any, template file to load templateFile = self.getTemplateFile() if templateFile is not None: # Load in template data with warnings.catch_warnings(): # Ignore a very particular warning from some versions of astropy.io.fits # that is a known bug and causes no problems with loading fits data. warnings.filterwarnings('ignore', message = 'Could not find appropriate MS Visual C Runtime ') hdulist = fits.open(templateFile) lam = np.power(10,hdulist[1].data['loglam'][::10]) # Downsample the templates to save on time flux = hdulist[1].data['flux'][::10] std = hdulist[1].data['std'][::10] # The templates are all normalized to 8000 Angstroms. The loaded spectrum # are normalized to this by default as well, but if they're not defined at 8000 Angstroms, # it is normalized to a different value that the template needs to be normalized to if self.specObj.normWavelength != 8000: flux = Spectrum.normalize(lam, self.specObj.normWavelength, flux) # Plot template error bars and spectrum line ax.plot(lam, flux, '-k', label = 'Template') if self.showTemplateError.isChecked(): # Only plot template error if option is selected to do so ax.fill_between(lam, flux+std, flux-std, color = 'b', edgecolor = 'None', alpha = 0.1, label = 'Template RMS') # Determine and format the template name for the title, from the filename templateName = os.path.basename(os.path.splitext(templateFile)[0]) if '_' in templateName: templateName = templateName.replace('Dwarf','') #hide Dwarf from plot title ii = templateName.find('_')+1 # Index of first underscore, before metallicity templateName = templateName[:ii] + '[Fe/H] = ' + templateName[ii:] templateName = templateName.replace('_',',\;') else: # No template exists, plot nothing templateName = 'Not\;Available' # *** Plot the user's data *** # Get the flux and fix it as the user requested if self.smoothSpectrum.isChecked(): flux = self.specObj.smoothFlux else: flux = self.specObj.flux if self.removeStitchSpike.isChecked(): flux = Spectrum.removeSdssStitchSpike(self.specObj.wavelength, flux) # Plot it all up and define the title name ax.plot(self.specObj.wavelength, flux, '-r', alpha = 0.75, label = 'Your Spectrum') spectraName = os.path.basename(os.path.splitext(self.outData[self.specIndex,0])[0]) # *** Set Plot Labels *** ax.set_xlabel(r'$\mathrm{Wavelength\;[\AA]}$', fontsize = 16) ax.set_ylabel(r'$\mathrm{Normalized\;Flux}$', fontsize = 16) ax.set_title(r'$\mathrm{Template:\;' + templateName + '}$\n$\mathrm{Spectrum:\;' + spectraName.replace('_','\_') + '}$', fontsize = 16) # *** Set Legend Settings *** handles, labels = ax.get_legend_handles_labels() # In matplotlib versions before 1.5, the fill_between plot command above # does not appear in the legend. In those cases, we will fake it out by # putting in a fake legend entry to match the fill_between plot. if pltVersion < '1.5' and self.showTemplateError.isChecked() and templateFile is not None: labels.append('Template RMS') handles.append(Rectangle((0,0),0,0, color = 'b', ec = 'None', alpha = 0.1)) labels, handles = zip(*sorted(zip(labels, handles), key=lambda t: t[0])) leg = ax.legend(handles, labels, loc = 0) leg.get_frame().set_alpha(0) # Set legend text colors to match plot line colors if templateFile is not None: # Don't adjust the template error if it wasn't plotted if self.showTemplateError.isChecked(): plt.setp(leg.get_texts()[1], color = 'b', alpha = 0.5) # Adjust template error, alpha is higher to make more readable plt.setp(leg.get_texts()[2], color = 'r', alpha = 0.75) # Adjust spectrum label else: plt.setp(leg.get_texts()[1], color = 'r', alpha = 0.75) # Adjust spectrum table else: plt.setp(leg.get_texts()[0], color = 'r', alpha = 0.6) # *** Set Plot Spacing *** plt.subplots_adjust(left = 0.143, right = 0.927, top = 0.816, bottom = 0.194) # *** Set Plot Limits *** ax.set_xlim([3500, 10500]) # Set x axis limits to constant value self.toolbar.update() # Clears out view stack self.full_xlim = plt.gca().get_xlim() # Pull out default, current x-axis limit self.full_ylim = plt.gca().get_ylim() # Pull out default, current y-axis limit self.toolbar.push_current() # Push the current full zoom level to the view stack if self.zoomed: # If the previous plot was zoomed in, we should zoom this too plt.xlim(self.zoomed_xlim) # Set to the previous plot's zoom level plt.ylim(self.zoomed_ylim) # Set to the previous plot's zoom level self.toolbar.push_current() # Push the current, zoomed level to the view stack so it shows up first # *** Calc and update the template match metric text *** if templateFile is not None: m = min(len(self.specObj.wavelength), len(hdulist[1].data['loglam'])) dist = round(np.sqrt(np.nansum([(t-s)**2 for t,s in zip(hdulist[1].data['flux'][:m],self.specObj.flux[:m])])),2) distSTR = '{:.3f}'.format(dist) distNorm = round(np.sqrt(np.nansum([((t-s)/err)**2 for t,s,err in zip(hdulist[1].data['flux'][:m],self.specObj.flux[:m],self.specObj.var[:m]**0.5)])),2) distNormSTR = '{:.3f}'.format(distNorm) self.specDist = dist self.specDistNorm = distNorm self.lineCHiSq = self.specObj.distance self.distMetric.setText(distSTR+" | "+distNormSTR+" | "+'{:.5e}'.format(self.specObj.distance)) else: distSTR = 'None' distNormSTR = 'None' self.distMetric.setText(distSTR+" | "+distNormSTR+" | "+"None") # *** Draw the Plot *** self.canvas.draw() ### # Menu Item Callback Methods # # No menu item callbacks exist currently. ### # Main GUI Component Callback Methods # def spectrumChosen(self, val): """ Description: Fires when the user chooses a new spectrum from the dropdown list of all available spectra. """ self.outputUserType() self.specIndex = val # Update the current spectrum index self.loadUserSpectrum() # Load the new spectrum self.updatePlot() # Update the plot with the new spectrum def specTypeChanged(self, val): """ Description: Fires when the the spectrum type slider has been changed either by the user or programmatically by the GUI """ self.checkSliderStates() self.updatePlot() def specSubtypeChanged(self, val): """ Description: Fires when the the spectrum subtype slider has been changed either by the user or programmatically by the GUI """ self.checkSliderStates() self.updatePlot() def metallicityChanged(self, val): """ Description: Fires when the the metallicity type slider has been changed either by the user or programmatically by the GUI """ self.updatePlot() def earlierCallback(self): """ Description: Fires when the earlier button is pressed (or the associated keyboard shortcut is used). Moves the template to an earlier spectral type and updates the plot. """ curSpec = self.specTypeSlider.sliderPosition() curSub = self.subTypeSlider.sliderPosition() # If user hasn't selected "O" spectral type and they're # currently selected zero sub type, we need to loop around # to the previous spectral type if curSpec != 0 and curSub == 0: # Set the sub spectral type, skipping over K8 and K9 # since they don't exist. self.updateSlider(self.subTypeSlider, 7 if curSpec == 6 else 9) # Decrease the spectral type self.updateSlider(self.specTypeSlider, curSpec - 1) else: # Just decrease sub spectral type self.updateSlider(self.subTypeSlider, curSub - 1) # Now check our current slider states to make sure # they're valid and then update the plot. self.checkSliderStates() self.updatePlot() def laterCallback(self): """ Description: Fires when the later button is pressed (or the associated keyboard shortcut is used). Moves the template to a later spectral type and updates the plot. """ curSpec = self.specTypeSlider.sliderPosition() curSub = self.subTypeSlider.sliderPosition() # If the user hasn't selected "L" spectral type and # they're currently selecting "9" spectral sub type # (or 7 if spec type is "K"), we need to loop around # to the next spectral type if curSpec != 7 and (curSub == 9 or (curSpec == 5 and curSub == 7)): self.updateSlider(self.specTypeSlider, curSpec + 1) self.updateSlider(self.subTypeSlider, 0) else: # Just increase the sub spectral type self.updateSlider(self.subTypeSlider, curSub + 1) # Now check our current slider states to make sure # they're valid and then update the plot. self.checkSliderStates() self.updatePlot() def lowerMetalCallback(self): """ Description: Fires when the later metallicity button is pressed (or the associated keyboard shortcut is used). Moves the template to a lower metallicity (if possible) and updates the plot. """ curMetal = self.metalSlider.sliderPosition() if curMetal != 0: self.updateSlider(self.metalSlider, curMetal - 1) self.updatePlot() def higherMetalCallback(self): """ Description: Fires when the higher metallicity button is pressed (or the associated keyboard shortcut is used). Moves the template to a higher metallicity (if possible) and updates the plot. """ curMetal = self.metalSlider.sliderPosition() if curMetal != 6: self.updateSlider(self.metalSlider, curMetal + 1) self.updatePlot() def oddCallback(self): """ Description: Fires when the odd button is pressed (or the associated keyboard shortcut is used). Brings up the Odd GUI by using a class from the gui_utils to allow the user to select a non- standard spectrum choice. """ option = OptionWindow(self, ['CV', 'Gal', 'Unknown'], instruction = 'Pick an odd type') if option.choice is not None: # Store the user's respose in the outData self.outData[self.specIndex, 5] = option.choice self.outData[self.specIndex, 6] = 'nan' # Move to the next spectrum self.moveToNextSpectrum() def badCallback(self): """ Description: Fires when the bad button is pressed (or the associated keyboard shortcut is used). Simply sets the spectrum choice as "BAD" and moves on to the next spectrum. """ # Store BAD as the user's choices self.outData[self.specIndex, 5] = 'BAD' self.outData[self.specIndex, 6] = 'BAD' # Move to the next spectra self.moveToNextSpectrum() def previousCallback(self): """ Description: Fires when the previous button is pressed (or the associated keyboard shortcut is used). Moves back to the previous user's spectrum. """ self.moveToPreviousSpectrum() def nextCallback(self): """ Description: Fires when the next button is pressed (or the associated keyboard shortcut is used). Stores the current spectrum's choice and moves forward to the next user's spectrum. """ # Store the choice for the current spectra # Move to the next spectra self.outputUserType() self.moveToNextSpectrum() def outputUserType(self): specState1 = self.specTypeSlider.sliderPosition() subState1 = self.subTypeSlider.sliderPosition() specState2 = self.specTypeSliderSB2.sliderPosition() subState2 = self.subTypeSliderSB2.sliderPosition() if self.SB2button.isChecked() == True: if self.specTypeSlider.sliderPosition() == 8: self.outData[self.specIndex,5] = "dC" + self.subTypeC[subState1] + "+" + self.specType[specState2] + self.subTypeWD[subState2] elif self.specTypeSliderSB2.sliderPosition() == 8: self.outData[self.specIndex,5] = self.specType[specState1] + str(subState1) + "+dC" + self.subTypeC[subState2] elif self.specTypeSliderSB2.sliderPosition() == 9: self.outData[self.specIndex,5] = self.specType[specState1] + str(subState1) + "+DA" + self.subTypeWD[subState2] else: self.outData[self.specIndex,5] = self.specType[self.specTypeSlider.sliderPosition()] + str(self.subTypeSlider.sliderPosition()) + "+" + self.specType[self.specTypeSliderSB2.sliderPosition()] + str(self.subTypeSliderSB2.sliderPosition()) self.outData[self.specIndex,6] = 'nan' else: if self.specTypeSlider.sliderPosition() == 8: self.outData[self.specIndex,5] = "dC" + self.subTypeC[subState1] self.outData[self.specIndex,6] = 'nan' elif self.specTypeSlider.sliderPosition() == 9: self.outData[self.specIndex,5] = "DA" + self.subTypeWD[subState1] self.outData[self.specIndex,6] = 'nan' else: self.outData[self.specIndex,5] = self.specType[self.specTypeSlider.sliderPosition()] + str(self.subTypeSlider.sliderPosition()) self.outData[self.specIndex,6] = self.metalType[self.metalSlider.sliderPosition()] def callback_hammer_time(self): timeCalled = time() if self.pPressTime == 0 or timeCalled - self.pPressTime > 1.5: # Reset self.pPressTime = timeCalled self.pPressNum = 1 return else: self.pPressNum += 1 if self.pPressNum == 5: chrList = [(10,1),(32,18),(46,1),(39,1),(47,1),(32,26),(10,1),(32,1),(42,1),(32,3),(39,1),(42,1),(32,10),(47,1),(32,1),(40,1),(95,11), (46,1),(45,12),(46,1),(32,2),(10,1),(32,9),(42,1),(32,7),(91,1),(32,1),(93,1),(95,11),(124,1),(47,2),(80,1),(121,1),(72,1), (97,1),(109,2),(101,1),(114,1),(47,2),(124,1),(32,2),(10,1),(32,14),(42,1),(32,2),(41,1),(32,1),(40,1),(32,11),(39,1),(45,12), (39,1),(32,2),(10,1),(32,17),(39,1),(45,1),(39,1),(32,1),(42,1),(32,25),(10,1),(32,13),(42,1),(32,33),(10,1),(32,19),(42,1),(32,27)] MessageBox(self, ''.join([chr(c[0])*c[1] for c in chrList]), fontFamily = 'courier') ### # Utility Methods # def checkSliderStates(self): """ Description: This method is used whenever the spectrum type sliders are updated. It will ensure that the user cannot choose certain spectrum types because they are invalid. """ # Check to see if the spectral slider is on the K type. # If it is, turn off the option to pick K8 and K9 # since those don't exist. Otherwise, just turn those # sub spectral type labels on. # self.specTypeSlider.blockSignals(True) # self.specTypeSliderSB2.blockSignals(True) self.subTypeSlider.blockSignals(True) self.subTypeSliderSB2.blockSignals(True) bad_primary = [0, 1, 7, 9] bad_secondary = [0, 1, 2, 7] if self.SB2button.isChecked() == True: # for ii in range(len(self.specTypeLabelSB2)): # self.specTypeLabelSB2[ii].setDisabled(False) # self.subTypeLabelSB2[ii].setDisabled(False) # for ii in bad_primary: # self.specTypeLabel[ii].setDisabled(True) # for ii in bad_secondary: # self.specTypeLabelSB2[ii].setDisabled(True) if self.specTypeSlider.sliderPosition() in [0,1]: self.updateSlider(self.specTypeSlider, 2) elif self.specTypeSlider.sliderPosition() in [7 ,9]: self.updateSlider(self.specTypeSlider, 8) if self.specTypeSliderSB2.sliderPosition() in [0,1,2]: self.updateSlider(self.specTypeSliderSB2, 3) elif self.specTypeSliderSB2.sliderPosition() in [7]: self.updateSlider(self.specTypeSliderSB2, 6) self.checkSB2combos() else: # for ii in range(len(self.specTypeLabelSB2)): # self.specTypeLabelSB2[ii].setDisabled(True) # self.subTypeLabelSB2[ii].setDisabled(True) # for ii in range(len(self.specTypeLabel)): # self.specTypeLabel[ii].setDisabled(False) # self.subTypeLabel[ii].setDisabled(False) if self.specTypeSlider.sliderPosition() == 5: self.subTypeLabel[-1].setDisabled(True) self.subTypeLabel[-2].setDisabled(True) if self.subTypeSlider.sliderPosition() in [8,9]: self.updateSlider(self.subTypeSlider, 7) elif self.specTypeSlider.sliderPosition() == 8: bad_dC_types = [3, 4, 5, 6, 7, 8, 9] for ii in bad_dC_types: self.subTypeLabel[ii].setDisabled(True) if self.subTypeSlider.sliderPosition() in bad_dC_types: self.updateSlider(self.subTypeSlider, 2) else: for ii in range(10): self.subTypeLabel[ii].setEnabled(True) self.updateSliderLabel() self.specTypeSlider.blockSignals(False) self.specTypeSliderSB2.blockSignals(False) self.subTypeSlider.blockSignals(False) self.subTypeSliderSB2.blockSignals(False) def limitSliders(self, good_PrimarySubtypes, good_Secondary_Types): this_good_PrimarySubtypes = good_PrimarySubtypes[self.specTypeSlider.sliderPosition()] this_good_Secondary_Types = good_Secondary_Types[self.specTypeSlider.sliderPosition()] notfound_good_SecondarySubtypes = True primaryType_index = self.specTypeSlider.sliderPosition() primarySubType_index = self.subTypeSlider.sliderPosition() secondaryType_index = self.specTypeSliderSB2.sliderPosition() #counter = 0 while notfound_good_SecondarySubtypes: #print(counter) good_SecondarySubtypes = self.specObj._splitSB2spectypes[np.where((self.specObj._splitSB2spectypes[:,0] == self.specObj.letterSpt[primaryType_index]) & (self.specObj._splitSB2spectypes[:,2] == self.specObj.letterSpt[secondaryType_index]) & (self.specObj._splitSB2spectypes[:,1] == str(primarySubType_index)))[0], 3] # .astype(int).tolist() if (secondaryType_index == 9) or (secondaryType_index == -1): # print(primaryType_index, primarySubType_index, secondaryType_index, good_SecondarySubtypes, self.subTypeWD) good_SecondarySubtypes = [int(np.where(np.array(self.subTypeWD) == ii)[0][0]) for ii in good_SecondarySubtypes if ii is not None] else: # print(primaryType_index, primarySubType_index, secondaryType_index, good_SecondarySubtypes) good_SecondarySubtypes = good_SecondarySubtypes.astype(int).tolist() if not good_SecondarySubtypes: #print(good_SecondarySubtypes) #rint("not if") #primaryType_index = #primarySubType_index = secondaryType_index = secondaryType_index - 1 if secondaryType_index > -9: self.updateSlider(self.subTypeSlider, np.arange(10)[primarySubType_index]) self.updateSlider(self.specTypeSliderSB2, np.arange(10)[secondaryType_index]) else: secondaryType_index = self.specTypeSliderSB2.sliderPosition() primarySubType_index = primarySubType_index - 1 if primarySubType_index < -10: primarySubType_index = 9 if good_SecondarySubtypes: #print(good_SecondarySubtypes) #print("if") notfound_good_SecondarySubtypes = False #counter += 1 #print(good_SecondarySubtypes) #print(self.specTypeSlider.sliderPosition()) #print(self.subTypeSlider.sliderPosition()) #print(self.specTypeSliderSB2.sliderPosition()) for ii in range(len(self.subTypeLabel)): self.subTypeLabel[ii].setDisabled(True) for ii in range(len(self.specTypeLabelSB2)): self.specTypeLabelSB2[ii].setDisabled(True) for ii in range(len(self.subTypeLabelSB2)): self.subTypeLabelSB2[ii].setDisabled(True) for ii in this_good_PrimarySubtypes: self.subTypeLabel[ii].setDisabled(False) for ii in this_good_Secondary_Types: self.specTypeLabelSB2[ii].setDisabled(False) if self.subTypeSlider.sliderPosition() not in this_good_PrimarySubtypes: self.updateSlider(self.subTypeSlider, this_good_PrimarySubtypes[np.argmin(np.abs(np.array(this_good_PrimarySubtypes) - self.subTypeSlider.sliderPosition()))]) if self.specTypeSliderSB2.sliderPosition() not in this_good_Secondary_Types: self.updateSlider(self.specTypeSliderSB2, this_good_Secondary_Types[np.argmin(np.abs(np.array(this_good_Secondary_Types) - self.specTypeSliderSB2.sliderPosition()))]) for ii in good_SecondarySubtypes: self.subTypeLabelSB2[ii].setDisabled(False) if self.subTypeSliderSB2.sliderPosition() not in good_SecondarySubtypes: self.updateSlider(self.subTypeSliderSB2, good_SecondarySubtypes[np.argmin(np.abs(np.array(good_SecondarySubtypes) - self.subTypeSliderSB2.sliderPosition()))]) def checkSB2combos(self): good_PrimarySubtypes = [[], #O [], #B [2, 3, 5, 7], #A [0, 2, 5, 6, 8], #F [0, 1, 3, 4, 5, 6, 7, 8, 9], #G [0, 1, 2, 3, 4, 5, 7], #K [0, 1, 2, 3, 4, 5, 6, 7], #M [], #L [0, 1, 2], #C [] #WD ] good_Secondary_Types = [[], #O [], #B [3], #A [4, 5], #F [5, 8, 9], #G [6, 8, 9], #K [8, 9], #M [], #L [9], #C [] #WD ] self.limitSliders(good_PrimarySubtypes, good_Secondary_Types) def moveToNextSpectrum(self): """ Description: This method handles moving to the next spectrum. All it really does is determines if the user is at the end of the list of spectrum, and, if so, asks if they're done. If they aren't at the end, it moves to the next spectrum (by incrementing self.specIndex) and calling self.loadUserSpectrum. """ if self.specIndex+1 >= len(self.outData): QApplication.beep() # Use the system beep to notify the user msg = MessageBox(self, "You've classified all the spectra. Are you finished?", buttons = QMessageBox.Yes|QMessageBox.No) if msg.reply == QMessageBox.Yes: self._cleanUpAndClose() else: self.specIndex += 1 self.loadUserSpectrum() self.updatePlot() def moveToPreviousSpectrum(self): """ Description: This method handles moving to the previous spectrum. It will simply decrement the self.specIndex variable if they're not already on the first index and call self.loadUserSpectrum. """ if self.specIndex > 0: self.specIndex -= 1 self.loadUserSpectrum() self.updatePlot() def splitSpecType(self, s): # head = s.rstrip('0123456789') # tail = s[len(head):] if 'dC' in s: head = 'dC' tail = s[-1] else: head, tail, _ = re.split('(\d.*)', s) return head, tail def loadUserSpectrum(self): """ Description: This handles loading a new spectrum based on the self.specIndex variable and updates the GUI components accordingly """ # Read in the spectrum file indicated by self.specIndex fname = self.outData[self.specIndex,0] ftype = self.outData[self.specIndex,1] self.specObj.readFile(self.options['spectraPath']+fname, ftype) # Ignore returned values self.specObj.normalizeFlux() self.specObj.guessSpecType() # Set the spectrum entry field to the new spectrum name self.spectrumList.setCurrentIndex(self.specIndex) # Set the sliders to the new spectrum's auto-classified choices auto_classified_choice = self.outData[self.specIndex,3] if '+' in auto_classified_choice: # IF SB2 user_type1, user_type2 = auto_classified_choice.replace("+"," ").replace("."," ").split()[:2] user_mainType1, user_subtype1 = self.splitSpecType(user_type1) user_mainType2, user_subtype2 = self.splitSpecType(user_type2) if 'dC' in user_mainType1: self.updateSlider(self.specTypeSlider, self.specType.index(user_mainType1)) self.updateSlider(self.subTypeSlider, self.subTypeC.index(user_subtype1)) self.updateSlider(self.specTypeSliderSB2, self.specType.index(user_mainType2)) self.updateSlider(self.subTypeSliderSB2, self.subType.index(user_subtype2)) elif 'dC' in user_mainType2: self.updateSlider(self.specTypeSlider, self.specType.index(user_mainType1)) self.updateSlider(self.subTypeSlider, self.subType.index(user_subtype1)) self.updateSlider(self.specTypeSliderSB2, self.specType.index(user_mainType2)) self.updateSlider(self.subTypeSliderSB2, self.subTypeC.index(user_subtype2)) else: self.updateSlider(self.specTypeSlider, self.specType.index(user_mainType1)) self.updateSlider(self.subTypeSlider, self.subType.index(user_subtype1)) self.updateSlider(self.specTypeSliderSB2, self.specType.index(user_mainType2)) self.updateSlider(self.subTypeSliderSB2, self.subType.index(user_subtype2)) self.SB2button.setChecked(True) self.checkSliderStates() else:# IF Single star specTypePostSplit, specSubTypePostSplit = self.splitSpecType(auto_classified_choice) self.updateSlider(self.specTypeSlider, self.specType.index(specTypePostSplit)) if specTypePostSplit == 'DA': self.updateSlider(self.subTypeSlider, self.subTypeWD.index(specSubTypePostSplit)) elif specTypePostSplit == 'dC': self.updateSlider(self.subTypeSlider, self.subTypeC.index(specSubTypePostSplit)) else: self.updateSlider(self.subTypeSlider, self.subType.index(specSubTypePostSplit)) self.updateSlider(self.metalSlider, self.metalType.index(self.outData[self.specIndex,4])) self.SB2button.setChecked(False) # Reset the indicator for whether the plot is zoomed. It should only stay zoomed # between loading templates, not between switching spectra. self.full_xlim = None self.full_ylim = None self.zoomed = False # Reset the smooth state to be unsmoothed, unless the user chose to lock the state if not self.lockSmoothState.isChecked(): self.smoothSpectrum.setChecked(False) def getTemplateFile(self, specState1 = None, subState1 = None, metalState = None, specState2 = None, subState2 = None): """ Description: This will determine the filename for the template which matches the current template selection. Either that selection will come from current slider positions, or else from input to this function. This will search for filenames matching a specific format. The first attempt will be to look for a filename of the format "SS_+M.M_Dwarf.fits", where the SS is the spectral type and subtype and +/-M.M is the [Fe/H] metallicity. The next next format it will try (if the first doesn't exist) is "SS_+M.M.fits". After that it will try "SS.fits". """ # If values weren't passed in for certain states, assume we should # use what is chosen on the GUI sliders if self.SB2button.isChecked() == True: if specState1 is None: specState1 = self.specTypeSlider.sliderPosition() if subState1 is None: subState1 = self.subTypeSlider.sliderPosition() if specState2 is None: specState2 = self.specTypeSliderSB2.sliderPosition() if subState2 is None: subState2 = self.subTypeSliderSB2.sliderPosition() if (specState1 == 8): #if SB2 with dC primary filename = "dC" + self.subTypeC[subState1] + "+" + self.specType[specState2] + self.subTypeWD[subState2] fullPath = os.path.join(self.SB2templateDir, filename + '.fits') if os.path.isfile(fullPath): return fullPath if (specState2 == 8): #if SB2 with dC primary filename = self.specType[specState1] + str(subState1) + "+dC" + self.subTypeC[subState2] fullPath = os.path.join(self.SB2templateDir, filename + '.fits') if os.path.isfile(fullPath): return fullPath if (specState2 == 9): # USE WD DA#.#.fits filename filename = self.specType[specState1] + str(subState1) + "+DA" + self.subTypeWD[subState2] fullPath = os.path.join(self.SB2templateDir, filename + '.fits') if os.path.isfile(fullPath): return fullPath filename = self.specType[specState1] + str(subState1) + "+" + self.specType[specState2] + str(subState2) fullPath = os.path.join(self.SB2templateDir, filename + '.fits') return fullPath else: if specState1 is None: specState1 = self.specTypeSlider.sliderPosition() if subState1 is None: subState1 = self.subTypeSlider.sliderPosition() if metalState is None: metalState = self.metalSlider.sliderPosition() if (specState1 == 9): # USE WD DA#.#.fits filename filename = "DA" + self.subTypeWD[subState1] fullPath = os.path.join(self.templateDir, filename + '.fits') if os.path.isfile(fullPath): return fullPath if (specState1 == 8): # USE dC#.fits filename filename = "dC" + self.subTypeC[subState1] fullPath = os.path.join(self.templateDir, filename + '.fits') if os.path.isfile(fullPath): return fullPath # Try using the full name, i.e., SS_+M.M_Dwarf.fits filename = self.specType[specState1] + str(subState1) + '_' + self.metalType[metalState] + '_Dwarf' fullPath = os.path.join(self.templateDir, filename + '.fits') if os.path.isfile(fullPath): return fullPath # Try using only the spectra and metallicity in the name, i.e., SS_+M.M.fits filename = filename[:7] fullPath = os.path.join(self.templateDir, filename + '.fits') if os.path.isfile(fullPath): return fullPath # Try to use just the spectral type, i.e., SS.fits #filename = filename[:2] filename = self.specType[specState1] + str(subState1) fullPath = os.path.join(self.templateDir, filename + '.fits') if os.path.isfile(fullPath): return fullPath # Return None if file could not be found return None def updateSlider(self, slider, value): slider.blockSignals(True) slider.setValue(value) slider.setSliderPosition(value) slider.blockSignals(False) def updateSliderLabel(self): if self.SB2button.isChecked() == True: for ii, label in enumerate(self.subType): self.subTypeLabel[ii].setText(label) if self.specTypeSlider.sliderPosition() == 8: for ii, label in enumerate(self.subTypeC): self.subTypeLabel[ii].setText(label) if self.specTypeSliderSB2.sliderPosition() == 9: # WD change subtype labels to the Sion+1983 convention for ii, label in enumerate(self.subTypeWD): self.subTypeLabelSB2[ii].setText(label) elif self.specTypeSliderSB2.sliderPosition() == 8: for ii, label in enumerate(self.subTypeC): self.subTypeLabelSB2[ii].setText(label) else: for ii, label in enumerate(self.subType): self.subTypeLabelSB2[ii].setText(label) else: if self.specTypeSlider.sliderPosition() == 9: # WD change subtype labels to the Sion+1983 convention for ii, label in enumerate(self.subTypeWD): self.subTypeLabel[ii].setText(label) elif self.specTypeSlider.sliderPosition() == 8: for ii, label in enumerate(self.subTypeC): self.subTypeLabel[ii].setText(label) else: for ii, label in enumerate(self.subType): self.subTypeLabel[ii].setText(label) # for ii, label in enumerate(text): # sliderLabel[ii].setText(label) def closeEvent(self, event): """ Description: This method is the window's closeEvent method and is here to override the QMainWindow closeEvent method. Effectively, if the user want's to quit the program and they trigger the close event, this will catch that event and ask them if they're sure they want to close first. If they respond with yes, this will run the _writeOutDataToFile function which saves the data the user has produced and then calls the parent close method to handle the actual close process. If they respond no, it just ignores the close event and continues running the GUI. """ # Ask the user if they're sure they actually want to quit msg = MessageBox(self, 'Are you sure you want to exit PyHammer?', title = 'Quit', buttons = QMessageBox.Yes|QMessageBox.No) if msg.reply == QMessageBox.Yes: self._writeOutDataToFile() event.accept() else: event.ignore() def _cleanUpAndClose(self): """ Description: This method is one of the available close methods of the GUI. It simply performs the process of outputing the spectra data to the output file and quitting. Note that the process of quitting involved is to force quit the entire QApplication, not just this QMainWindow object. This is because of the fact that we don't want to trigger the closeEvent method of the QMainWindow object. """ self._writeOutDataToFile() qApp.quit() def _writeOutDataToFile(self): """ Description: Writes all the output data to the output file. This method is used before any quit procedures so save the data recorded by the user before closing the GUI. """ with open(self.options['outfile'], 'w') as outfile: outfile.write('#Filename,File Type,Radial Velocity (km/s),Guessed Spectral Type,Guessed [Fe/H],User Spectral Type,User [Fe/H]\n') for i, spectra in enumerate(self.outData): for j, col in enumerate(spectra): outfile.write(col) if j < 6: outfile.write(',') if i < len(self.outData)-1: outfile.write('\n')
BU-hammerTeam/PyHammer
eyecheck.py
Python
mit
70,235
[ "Galaxy" ]
e9a29c4d8095b1c6363b8b5f2f4a4eff5b06f6f7f5e44150d63fd2a2073355db
""" Routines for transforming FERC Form 1 data before loading into the PUDL DB. This module provides a variety of functions that are used in cleaning up the FERC Form 1 data prior to loading into our database. This includes adopting standardized units and column names, standardizing the formatting of some string values, and correcting data entry errors which we can infer based on the existing data. It may also include removing bad data, or replacing it with the appropriate NA values. """ import importlib.resources import logging import re from difflib import SequenceMatcher from typing import Dict, List # NetworkX is used to knit incomplete ferc plant time series together. import networkx as nx import numpy as np import pandas as pd from sklearn.base import BaseEstimator, ClassifierMixin from sklearn.compose import ColumnTransformer from sklearn.feature_extraction.text import TfidfVectorizer # These modules are required for the FERC Form 1 Plant ID & Time Series from sklearn.metrics.pairwise import cosine_similarity from sklearn.pipeline import Pipeline from sklearn.preprocessing import MinMaxScaler, Normalizer, OneHotEncoder import pudl from pudl.helpers import convert_cols_dtypes from pudl.metadata.classes import DataSource from pudl.metadata.dfs import FERC_DEPRECIATION_LINES logger = logging.getLogger(__name__) ############################################################################## # Dicts for categorizing freeform strings #################################### ############################################################################## FUEL_STRINGS: Dict[str, List[str]] = { "coal": [ 'coal', 'coal-subbit', 'lignite', 'coal(sb)', 'coal (sb)', 'coal-lignite', 'coke', 'coa', 'lignite/coal', 'coal - subbit', 'coal-subb', 'coal-sub', 'coal-lig', 'coal-sub bit', 'coals', 'ciak', 'petcoke', 'coal.oil', 'coal/gas', 'bit coal', 'coal-unit #3', 'coal-subbitum', 'coal tons', 'coal mcf', 'coal unit #3', 'pet. coke', 'coal-u3', 'coal&coke', 'tons', ], "oil": [ 'oil', '#6 oil', '#2 oil', 'fuel oil', 'jet', 'no. 2 oil', 'no.2 oil', 'no.6& used', 'used oil', 'oil-2', 'oil (#2)', 'diesel oil', 'residual oil', '# 2 oil', 'resid. oil', 'tall oil', 'oil/gas', 'no.6 oil', 'oil-fuel', 'oil-diesel', 'oil / gas', 'oil bbls', 'oil bls', 'no. 6 oil', '#1 kerosene', 'diesel', 'no. 2 oils', 'blend oil', '#2oil diesel', '#2 oil-diesel', '# 2 oil', 'light oil', 'heavy oil', 'gas.oil', '#2', '2', '6', 'bbl', 'no 2 oil', 'no 6 oil', '#1 oil', '#6', 'oil-kero', 'oil bbl', 'biofuel', 'no 2', 'kero', '#1 fuel oil', 'no. 2 oil', 'blended oil', 'no 2. oil', '# 6 oil', 'nno. 2 oil', '#2 fuel', 'oill', 'oils', 'gas/oil', 'no.2 oil gas', '#2 fuel oil', 'oli', 'oil (#6)', 'oil/diesel', '2 oil', '#6 hvy oil', 'jet fuel', 'diesel/compos', 'oil-8', 'oil {6}', 'oil-unit #1', 'bbl.', 'oil.', # noqa: FS003 'oil #6', 'oil (6)', 'oil(#2)', 'oil-unit1&2', 'oil-6', '#2 fue oil', 'dielel oil', 'dielsel oil', '#6 & used', 'barrels', 'oil un 1 & 2', 'jet oil', 'oil-u1&2', 'oiul', 'pil', 'oil - 2', '#6 & used', 'oial', 'diesel fuel', 'diesel/compo', 'oil (used)', ], "gas": [ 'gas', 'gass', 'methane', 'natural gas', 'blast gas', 'gas mcf', 'propane', 'prop', 'natural gas', 'nat.gas', 'nat gas', 'nat. gas', 'natl gas', 'ga', 'gas`', 'syngas', 'ng', 'mcf', 'blast gaa', 'nat gas', 'gac', 'syngass', 'prop.', 'natural', 'coal.gas', 'n. gas', 'lp gas', 'natuaral gas', 'coke gas', 'gas #2016', 'propane**', '* propane', 'propane **', 'gas expander', 'gas ct', '# 6 gas', '#6 gas', 'coke oven gas' ], "solar": [], "wind": [], "hydro": [], "nuclear": [ 'nuclear', 'grams of uran', 'grams of', 'grams of ura', 'grams', 'nucleur', 'nulear', 'nucl', 'nucleart', 'nucelar', 'gr.uranium', 'grams of urm', 'nuclear (9)', 'nulcear', 'nuc', 'gr. uranium', 'nuclear mw da', 'grams of ura', 'nucvlear', ], "waste": [ 'tires', 'tire', 'refuse', 'switchgrass', 'wood waste', 'woodchips', 'biomass', 'wood', 'wood chips', 'rdf', 'tires/refuse', 'tire refuse', 'waste oil', 'waste', 'woodships', 'tire chips', 'tdf', ], "other": [ 'steam', 'purch steam', 'all', 'n/a', 'purch. steam', 'other', 'composite', 'composit', 'mbtus', 'total', 'avg', 'avg.', 'blo', 'all fuel', 'comb.', 'alt. fuels', 'na', 'comb', '/#=2\x80â\x91?', 'kã\xadgv¸\x9d?', "mbtu's", 'gas, oil', 'rrm', '3\x9c', 'average', 'furfural', '0', 'watson bng', 'toal', 'bng', '# 6 & used', 'combined', 'blo bls', 'compsite', '*', 'compos.', 'gas / oil', 'mw days', 'g', 'c', 'lime', 'all fuels', 'at right', '20', '1', 'comp oil/gas', 'all fuels to', 'the right are', 'c omposite', 'all fuels are', 'total pr crk', 'all fuels =', 'total pc', 'comp', 'alternative', 'alt. fuel', 'bio fuel', 'total prairie', '', 'kã\xadgv¸?', 'm', 'waste heat', '/#=2â?', '3', ], } """ A mapping a canonical fuel name to a list of strings which are used to represent that fuel in the FERC Form 1 Reporting. Case is ignored, as all fuel strings are converted to a lower case in the data set. """ FUEL_UNIT_STRINGS: Dict[str, List[str]] = { "ton": [ 'toms', 'taons', 'tones', 'col-tons', 'toncoaleq', 'coal', 'tons coal eq', 'coal-tons', 'ton', 'tons', 'tons coal', 'coal-ton', 'tires-tons', 'coal tons -2 ', 'oil-tons', 'coal tons 200', 'ton-2000', 'coal tons', 'coal tons -2', 'coal-tone', 'tire-ton', 'tire-tons', 'ton coal eqv', 'tos', 'coal tons - 2', 'c. t.', 'c.t.', 'toncoalequiv', ], "mcf": [ 'mcf', "mcf's", 'mcfs', 'mcf.', 'gas mcf', '"gas" mcf', 'gas-mcf', 'mfc', 'mct', ' mcf', 'msfs', 'mlf', 'mscf', 'mci', 'mcl', 'mcg', 'm.cu.ft.', 'kcf', '(mcf)', 'mcf *(4)', 'mcf00', 'm.cu.ft..', '1000 c.f', ], "bbl": [ 'barrel', 'bbls', 'bbl', 'barrels', 'bbrl', 'bbl.', 'bbls.', 'oil 42 gal', 'oil-barrels', 'barrrels', 'bbl-42 gal', 'oil-barrel', 'bb.', 'barrells', 'bar', 'bbld', 'oil- barrel', 'barrels .', 'bbl .', 'barels', 'barrell', 'berrels', 'bb', 'bbl.s', 'oil-bbl', 'bls', 'bbl:', 'barrles', 'blb', 'propane-bbl', 'barriel', 'berriel', 'barrile', '(bbl.)', 'barrel *(4)', '(4) barrel', 'bbf', 'blb.', '(bbl)', 'bb1', 'bbsl', 'barrrel', 'barrels 100%', 'bsrrels', "bbl's", '*barrels', 'oil - barrels', 'oil 42 gal ba', 'bll', 'boiler barrel', 'gas barrel', '"boiler" barr', '"gas" barrel', '"boiler"barre', '"boiler barre', 'barrels .', 'bariel', 'brrels', 'oil barrel', 'barreks', 'oil-bbls', 'oil-bbs', ], "gal": ['gallons', 'gal.', 'gals', 'gals.', 'gallon', 'gal', 'galllons'], "kgal": [ 'oil(1000 gal)', 'oil(1000)', 'oil (1000)', 'oil(1000', 'oil(1000ga)', '1000 gals', '1000 gal', ], "gramsU": [ 'gram', 'grams', 'gm u', 'grams u235', 'grams u-235', 'grams of uran', 'grams: u-235', 'grams:u-235', 'grams:u235', 'grams u308', 'grams: u235', 'grams of', 'grams - n/a', 'gms uran', 's e uo2 grams', 'gms uranium', 'grams of urm', 'gms. of uran', 'grams (100%)', 'grams v-235', 'se uo2 grams', 'grams u', ], "kgU": [ 'kg of uranium', 'kg uranium', 'kilg. u-235', 'kg u-235', 'kilograms-u23', 'kg', 'kilograms u-2', 'kilograms', 'kg of', 'kg-u-235', 'kilgrams', 'kilogr. u235', 'uranium kg', 'kg uranium25', 'kilogr. u-235', 'kg uranium 25', 'kilgr. u-235', 'kguranium 25', 'kg-u235', 'kgm', ], "klbs": ['k lbs.', 'k lbs', '1000 / lbs', '1000 lbs', ], "mmbtu": [ 'mmbtu', 'mmbtus', 'mbtus', '(mmbtu)', "mmbtu's", 'nuclear-mmbtu', 'nuclear-mmbt', 'mmbtul', ], "btu": ['btus', 'btu', ], "mwdth": [ 'mwd therman', 'mw days-therm', 'mwd thrml', 'mwd thermal', 'mwd/mtu', 'mw days', 'mwdth', 'mwd', 'mw day', 'dth', 'mwdaysthermal', 'mw day therml', 'mw days thrml', 'nuclear mwd', 'mmwd', 'mw day/therml' 'mw days/therm', 'mw days (th', 'ermal)', ], "mwhth": [ 'mwh them', 'mwh threm', 'nwh therm', 'mwhth', 'mwh therm', 'mwh', 'mwh therms.', 'mwh term.uts', 'mwh thermal', 'mwh thermals', 'mw hr therm', 'mwh therma', 'mwh therm.uts', ], "unknown": [ '', '1265', 'mwh units', 'composite', 'therms', 'n/a', 'mbtu/kg', 'uranium 235', 'oil', 'ccf', '2261', 'uo2', '(7)', 'oil #2', 'oil #6', '\x99å\x83\x90?"', 'dekatherm', '0', 'mw day/therml', 'nuclear', 'gas', '62,679', 'mw days/therm', 'na', 'uranium', 'oil/gas', 'thermal', '(thermal)', 'se uo2', '181679', '83', '3070', '248', '273976', '747', '-', 'are total', 'pr. creek', 'decatherms', 'uramium', '.', 'total pr crk', '>>>>>>>>', 'all', 'total', 'alternative-t', 'oil-mcf', '3303671', '929', '7182175', '319', '1490442', '10881', '1363663', '7171', '1726497', '4783', '7800', '12559', '2398', 'creek fuels', 'propane-barre', '509', 'barrels/mcf', 'propane-bar', '4853325', '4069628', '1431536', '708903', 'mcf/oil (1000', '344', 'å?"', 'mcf / gallen', ], } """ A dictionary linking fuel units (keys) to lists of various strings representing those fuel units (values) """ PLANT_KIND_STRINGS: Dict[str, List[str]] = { "steam": [ 'coal', 'steam', 'steam units 1 2 3', 'steam units 4 5', 'steam fossil', 'steam turbine', 'steam a', 'steam 100', 'steam units 1 2 3', 'steams', 'steam 1', 'steam retired 2013', 'stream', 'steam units 1,2,3', 'steam units 4&5', 'steam units 4&6', 'steam conventional', 'unit total-steam', 'unit total steam', '*resp. share steam', 'resp. share steam', 'steam (see note 1,', 'steam (see note 3)', 'mpc 50%share steam', '40% share steam' 'steam (2)', 'steam (3)', 'steam (4)', 'steam (5)', 'steam (6)', 'steam (7)', 'steam (8)', 'steam units 1 and 2', 'steam units 3 and 4', 'steam (note 1)', 'steam (retired)', 'steam (leased)', 'coal-fired steam', 'oil-fired steam', 'steam/fossil', 'steam (a,b)', 'steam (a)', 'stean', 'steam-internal comb', 'steam (see notes)', 'steam units 4 & 6', 'resp share stm note3', 'mpc50% share steam', 'mpc40%share steam', 'steam - 64%', 'steam - 100%', 'steam (1) & (2)', 'resp share st note3', 'mpc 50% shares steam', 'steam-64%', 'steam-100%', 'steam (see note 1)', 'mpc 50% share steam', 'steam units 1, 2, 3', 'steam units 4, 5', 'steam (2)', 'steam (1)', 'steam 4, 5', 'steam - 72%', 'steam (incl i.c.)', 'steam- 72%', 'steam;retired - 2013', "respondent's sh.-st.", "respondent's sh-st", '40% share steam', 'resp share stm note3', 'mpc50% share steam', 'resp share st note 3', '\x02steam (1)', 'coal fired steam tur', 'steam- 64%', ], "combustion_turbine": [ 'combustion turbine', 'gt', 'gas turbine', 'gas turbine # 1', 'gas turbine', 'gas turbine (note 1)', 'gas turbines', 'simple cycle', 'combustion turbine', 'comb.turb.peak.units', 'gas turbine', 'combustion turbine', 'com turbine peaking', 'gas turbine peaking', 'comb turb peaking', 'combustine turbine', 'comb. turine', 'conbustion turbine', 'combustine turbine', 'gas turbine (leased)', 'combustion tubine', 'gas turb', 'gas turbine peaker', 'gtg/gas', 'simple cycle turbine', 'gas-turbine', 'gas turbine-simple', 'gas turbine - note 1', 'gas turbine #1', 'simple cycle', 'gasturbine', 'combustionturbine', 'gas turbine (2)', 'comb turb peak units', 'jet engine', 'jet powered turbine', '*gas turbine', 'gas turb.(see note5)', 'gas turb. (see note', 'combutsion turbine', 'combustion turbin', 'gas turbine-unit 2', 'gas - turbine', 'comb turbine peaking', 'gas expander turbine', 'jet turbine', 'gas turbin (lease', 'gas turbine (leased', 'gas turbine/int. cm', 'comb.turb-gas oper.', 'comb.turb.gas/oil op', 'comb.turb.oil oper.', 'jet', 'comb. turbine (a)', 'gas turb.(see notes)', 'gas turb(see notes)', 'comb. turb-gas oper', 'comb.turb.oil oper', 'gas turbin (leasd)', 'gas turbne/int comb', 'gas turbine (note1)', 'combution turbin', '* gas turbine', 'add to gas turbine', 'gas turbine (a)', 'gas turbinint comb', 'gas turbine (note 3)', 'resp share gas note3', 'gas trubine', '*gas turbine(note3)', 'gas turbine note 3,6', 'gas turbine note 4,6', 'gas turbine peakload', 'combusition turbine', 'gas turbine (lease)', 'comb. turb-gas oper.', 'combution turbine', 'combusion turbine', 'comb. turb. oil oper', 'combustion burbine', 'combustion and gas', 'comb. turb.', 'gas turbine (lease', 'gas turbine (leasd)', 'gas turbine/int comb', '*gas turbine(note 3)', 'gas turbine (see nos', 'i.c.e./gas turbine', 'gas turbine/intcomb', 'cumbustion turbine', 'gas turb, int. comb.', 'gas turb, diesel', 'gas turb, int. comb', 'i.c.e/gas turbine', 'diesel turbine', 'comubstion turbine', 'i.c.e. /gas turbine', 'i.c.e/ gas turbine', 'i.c.e./gas tubine', 'gas turbine; retired', ], "combined_cycle": [ 'Combined cycle', 'combined cycle', 'combined', 'gas & steam turbine', 'gas turb. & heat rec', 'combined cycle', 'com. cyc', 'com. cycle', 'gas turb-combined cy', 'combined cycle ctg', 'combined cycle - 40%', 'com cycle gas turb', 'combined cycle oper', 'gas turb/comb. cyc', 'combine cycle', 'cc', 'comb. cycle', 'gas turb-combined cy', 'steam and cc', 'steam cc', 'gas steam', 'ctg steam gas', 'steam comb cycle', 'gas/steam comb. cycl', 'steam (comb. cycle)' 'gas turbine/steam', 'steam & gas turbine', 'gas trb & heat rec', 'steam & combined ce', 'st/gas turb comb cyc', 'gas tur & comb cycl', 'combined cycle (a,b)', 'gas turbine/ steam', 'steam/gas turb.', 'steam & comb cycle', 'gas/steam comb cycle', 'comb cycle (a,b)', 'igcc', 'steam/gas turbine', 'gas turbine / steam', 'gas tur & comb cyc', 'comb cyc (a) (b)', 'comb cycle', 'comb cyc', 'combined turbine', 'combine cycle oper', 'comb cycle/steam tur', 'cc / gas turb', 'steam (comb. cycle)', 'steam & cc', 'gas turbine/steam', 'gas turb/cumbus cycl', 'gas turb/comb cycle', 'gasturb/comb cycle', 'gas turb/cumb. cyc', 'igcc/gas turbine', 'gas / steam', 'ctg/steam-gas', 'ctg/steam -gas', 'gas fired cc turbine', 'combinedcycle', 'comb cycle gas turb', 'combined cycle opern', 'comb. cycle gas turb', 'ngcc', ], "nuclear": [ 'nuclear', 'nuclear (3)', 'steam(nuclear)', 'nuclear(see note4)' 'nuclear steam', 'nuclear turbine', 'nuclear - steam', 'nuclear (a)(b)(c)', 'nuclear (b)(c)', '* nuclear', 'nuclear (b) (c)', 'nuclear (see notes)', 'steam (nuclear)', '* nuclear (note 2)', 'nuclear (note 2)', 'nuclear (see note 2)', 'nuclear(see note4)', 'nuclear steam', 'nuclear(see notes)', 'nuclear-steam', 'nuclear (see note 3)' ], "geothermal": ['steam - geothermal', 'steam_geothermal', 'geothermal'], "internal_combustion": [ 'ic', 'internal combustion', 'internal comb.', 'internl combustion' 'diesel turbine', 'int combust (note 1)', 'int. combust (note1)', 'int.combustine', 'comb. cyc', 'internal comb', 'diesel', 'diesel engine', 'internal combustion', 'int combust - note 1', 'int. combust - note1', 'internal comb recip', 'reciprocating engine', 'comb. turbine', 'internal combust.', 'int. combustion (1)', '*int combustion (1)', "*internal combust'n", 'internal', 'internal comb.', 'steam internal comb', 'combustion', 'int. combustion', 'int combust (note1)', 'int. combustine', 'internl combustion', '*int. combustion (1)', 'internal conbustion', ], "wind": [ 'wind', 'wind energy', 'wind turbine', 'wind - turbine', 'wind generation', 'wind turbin', ], "photovoltaic": ['solar photovoltaic', 'photovoltaic', 'solar', 'solar project'], "solar_thermal": ['solar thermal'], "unknown": [ '', 'n/a', 'see pgs 402.1-402.3', 'see pgs 403.1-403.9', "respondent's share", '--', '(see note 7)', 'other', 'not applicable', 'peach bottom', 'none.', 'fuel facilities', '0', 'not in service', 'none', 'common expenses', 'expenses common to', 'retired in 1981', 'retired in 1978', 'na', 'unit total (note3)', 'unit total (note2)', 'resp. share (note2)', 'resp. share (note8)', 'resp. share (note 9)', 'resp. share (note11)', 'resp. share (note4)', 'resp. share (note6)', 'conventional', 'expenses commom to', 'not in service in', 'unit total (note 3)', 'unit total (note 2)', 'resp. share (note 8)', 'resp. share (note 3)', 'resp. share note 11', 'resp. share (note 4)', 'resp. share (note 6)', '(see note 5)', 'resp. share (note 2)', 'package', '(left blank)', 'common', '0.0000', 'other generation', 'resp share (note 11)', 'retired', 'storage/pipelines', 'sold april 16, 1999', 'sold may 07, 1999', 'plants sold in 1999', 'gas', 'not applicable.', 'resp. share - note 2', 'resp. share - note 8', 'resp. share - note 9', 'resp share - note 11', 'resp. share - note 4', 'resp. share - note 6', 'plant retired- 2013', 'retired - 2013', 'resp share - note 5', 'resp. share - note 7', 'non-applicable', 'other generation plt', 'combined heat/power', 'oil' ], } """ A mapping from canonical plant kinds (keys) to the associated freeform strings (values) identified as being associated with that kind of plant in the FERC Form 1 raw data. There are many strings that weren't categorized, Solar and Solar Project were not classified as these do not indicate if they are solar thermal or photovoltaic. Variants on Steam (e.g. "steam 72" and "steam and gas") were classified based on additional research of the plants on the Internet. """ CONSTRUCTION_TYPE_STRINGS: Dict[str, List[str]] = { "outdoor": [ 'outdoor', 'outdoor boiler', 'full outdoor', 'outdoor boiler', 'outdoor boilers', 'outboilers', 'fuel outdoor', 'full outdoor', 'outdoors', 'outdoor', 'boiler outdoor& full', 'boiler outdoor&full', 'outdoor boiler& full', 'full -outdoor', 'outdoor steam', 'outdoor boiler', 'ob', 'outdoor automatic', 'outdoor repower', 'full outdoor boiler', 'fo', 'outdoor boiler & ful', 'full-outdoor', 'fuel outdoor', 'outoor', 'outdoor', 'outdoor boiler&full', 'boiler outdoor &full', 'outdoor boiler &full', 'boiler outdoor & ful', 'outdoor-boiler', 'outdoor - boiler', 'outdoor const.', '4 outdoor boilers', '3 outdoor boilers', 'full outdoor', 'full outdoors', 'full oudoors', 'outdoor (auto oper)', 'outside boiler', 'outdoor boiler&full', 'outdoor hrsg', 'outdoor hrsg', 'outdoor-steel encl.', 'boiler-outdr & full', 'con.& full outdoor', 'partial outdoor', 'outdoor (auto. oper)', 'outdoor (auto.oper)', 'outdoor construction', '1 outdoor boiler', '2 outdoor boilers', 'outdoor enclosure', '2 outoor boilers', 'boiler outdr.& full', 'boiler outdr. & full', 'ful outdoor', 'outdoor-steel enclos', 'outdoor (auto oper.)', 'con. & full outdoor', 'outdore', 'boiler & full outdor', 'full & outdr boilers', 'outodoor (auto oper)', 'outdoor steel encl.', 'full outoor', 'boiler & outdoor ful', 'otdr. blr. & f. otdr', 'f.otdr & otdr.blr.', 'oudoor (auto oper)', 'outdoor constructin', 'f. otdr. & otdr. blr', 'outdoor boiler & fue', 'outdoor boiler &fuel', ], "semioutdoor": [ 'more than 50% outdoo', 'more than 50% outdos', 'over 50% outdoor', 'over 50% outdoors', 'semi-outdoor', 'semi - outdoor', 'semi outdoor', 'semi-enclosed', 'semi-outdoor boiler', 'semi outdoor boiler', 'semi- outdoor', 'semi - outdoors', 'semi -outdoor' 'conven & semi-outdr', 'conv & semi-outdoor', 'conv & semi- outdoor', 'convent. semi-outdr', 'conv. semi outdoor', 'conv(u1)/semiod(u2)', 'conv u1/semi-od u2', 'conv-one blr-semi-od', 'convent semioutdoor', 'conv. u1/semi-od u2', 'conv - 1 blr semi od', 'conv. ui/semi-od u2', 'conv-1 blr semi-od', 'conven. semi-outdoor', 'conv semi-outdoor', 'u1-conv./u2-semi-od', 'u1-conv./u2-semi -od', 'convent. semi-outdoo', 'u1-conv. / u2-semi', 'conven & semi-outdr', 'semi -outdoor', 'outdr & conventnl', 'conven. full outdoor', 'conv. & outdoor blr', 'conv. & outdoor blr.', 'conv. & outdoor boil', 'conv. & outdr boiler', 'conv. & out. boiler', 'convntl,outdoor blr', 'outdoor & conv.', '2 conv., 1 out. boil', 'outdoor/conventional', 'conv. boiler outdoor', 'conv-one boiler-outd', 'conventional outdoor', 'conventional outdor', 'conv. outdoor boiler', 'conv.outdoor boiler', 'conventional outdr.', 'conven,outdoorboiler', 'conven full outdoor', 'conven,full outdoor', '1 out boil, 2 conv', 'conv. & full outdoor', 'conv. & outdr. boilr', 'conv outdoor boiler', 'convention. outdoor', 'conv. sem. outdoor', 'convntl, outdoor blr', 'conv & outdoor boil', 'conv & outdoor boil.', 'outdoor & conv', 'conv. broiler outdor', '1 out boilr, 2 conv', 'conv.& outdoor boil.', 'conven,outdr.boiler', 'conven,outdr boiler', 'outdoor & conventil', '1 out boilr 2 conv', 'conv & outdr. boilr', 'conven, full outdoor', 'conven full outdr.', 'conven, full outdr.', 'conv/outdoor boiler', "convnt'l outdr boilr", '1 out boil 2 conv', 'conv full outdoor', 'conven, outdr boiler', 'conventional/outdoor', 'conv&outdoor boiler', 'outdoor & convention', 'conv & outdoor boilr', 'conv & full outdoor', 'convntl. outdoor blr', 'conv - ob', "1conv'l/2odboilers", "2conv'l/1odboiler", 'conv-ob', 'conv.-ob', '1 conv/ 2odboilers', '2 conv /1 odboilers', 'conv- ob', 'conv -ob', 'con sem outdoor', 'cnvntl, outdr, boilr', 'less than 50% outdoo', 'under 50% outdoor', 'under 50% outdoors', '1cnvntnl/2odboilers', '2cnvntnl1/1odboiler', 'con & ob', 'combination (b)', 'indoor & outdoor', 'conven. blr. & full', 'conv. & otdr. blr.', 'combination', 'indoor and outdoor', 'conven boiler & full', "2conv'l/10dboiler", '4 indor/outdr boiler', '4 indr/outdr boilerr', '4 indr/outdr boiler', 'indoor & outdoof', ], "conventional": [ 'conventional', 'conventional', 'conventional boiler', 'conv-b', 'conventionall', 'convention', 'conventional', 'coventional', 'conven full boiler', 'c0nventional', 'conventtional', 'convential' 'underground', 'conventional bulb', 'conventrional', '*conventional', 'convential', 'convetional', 'conventioanl', 'conventioinal', 'conventaional', 'indoor construction', 'convenional', 'conventional steam', 'conventinal', 'convntional', 'conventionl', 'conventionsl', 'conventiional', 'convntl steam plants', 'indoor const.', 'full indoor', 'indoor', 'indoor automatic', 'indoor boiler', '(peak load) indoor', 'conventionl,indoor', 'conventionl, indoor', 'conventional, indoor', 'conventional;outdoor', 'conven./outdoor', 'conventional;semi-ou', 'comb. cycle indoor', '3 indoor boiler', '2 indoor boilers', '1 indoor boiler', '2 indoor boiler', '3 indoor boilers', 'fully contained', 'conv - b', 'conventional/boiler', 'cnventional', 'comb. cycle indooor', 'sonventional', 'ind enclosures', 'conentional', 'conventional - boilr', 'indoor boiler and st', ], "unknown": [ '', 'automatic operation', 'comb. turb. installn', 'comb. turb. instaln', 'com. turb. installn', 'n/a', 'for detailed info.', 'for detailed info', 'combined cycle', 'na', 'not applicable', 'gas', 'heated individually', 'metal enclosure', 'pressurized water', 'nuclear', 'jet engine', 'gas turbine', 'storage/pipelines', '0', 'during 1994', 'peaking - automatic', 'gas turbine/int. cm', '2 oil/gas turbines', 'wind', 'package', 'mobile', 'auto-operated', 'steam plants', 'other production', 'all nuclear plants', 'other power gen.', 'automatically operad', 'automatically operd', 'circ fluidized bed', 'jet turbine', 'gas turbne/int comb', 'automatically oper.', 'retired 1/1/95', 'during 1995', '1996. plant sold', 'reactivated 7/1/96', 'gas turbine/int comb', 'portable', 'head individually', 'automatic opertion', 'peaking-automatic', 'cycle', 'full order', 'circ. fluidized bed', 'gas turbine/intcomb', '0.0000', 'none', '2 oil / gas', 'block & steel', 'and 2000', 'comb.turb. instaln', 'automatic oper.', 'pakage', '---', 'n/a (ct)', 'comb turb instain', 'ind encloures', '2 oil /gas turbines', 'combustion turbine', '1970', 'gas/oil turbines', 'combined cycle steam', 'pwr', '2 oil/ gas', '2 oil / gas turbines', 'gas / oil turbines', 'no boiler', 'internal combustion', 'gasturbine no boiler', 'boiler', 'tower -10 unit facy', 'gas trubine', '4 gas/oil trubines', '2 oil/ 4 gas/oil tur', '5 gas/oil turbines', 'tower 16', '2 on 1 gas turbine', 'tower 23', 'tower -10 unit', 'tower - 101 unit', '3 on 1 gas turbine', 'tower - 10 units', 'tower - 165 units', 'wind turbine', 'fixed tilt pv', 'tracking pv', 'o', 'wind trubine', 'subcritical', 'sucritical', 'simple cycle', 'simple & reciprocat', 'solar', 'pre-fab power plant', 'prefab power plant', 'prefab. power plant', 'pump storage', 'underground', 'see page 402', 'conv. underground', 'conven. underground', 'conventional (a)', 'non-applicable', ], } """ A dictionary of construction types (keys) and lists of construction type strings associated with each type (values) from FERC Form 1. There are many strings that weren't categorized, including crosses between conventional and outdoor, PV, wind, combined cycle, and internal combustion. The lists are broken out into the two types specified in Form 1: conventional and outdoor. These lists are inclusive so that variants of conventional (e.g. "conventional full") and outdoor (e.g. "outdoor full" and "outdoor hrsg") are included. """ ############################################################################## # FERC TRANSFORM HELPER FUNCTIONS ############################################ ############################################################################## def unpack_table(ferc1_df, table_name, data_cols, data_rows): """ Normalize a row-and-column based FERC Form 1 table. Pulls the named database table from the FERC Form 1 DB and uses the corresponding ferc1_row_map to unpack the row_number coded data. Args: ferc1_df (pandas.DataFrame): Raw FERC Form 1 DataFrame from the DB. table_name (str): Original name of the FERC Form 1 DB table. data_cols (list): List of strings corresponding to the original FERC Form 1 database table column labels -- these are the columns of data that we are extracting (it can be a subset of the columns which are present in the original database). data_rows (list): List of row_names to extract, as defined in the FERC 1 row maps. Set to slice(None) if you want all rows. Returns: pandas.DataFrame """ # Read in the corresponding row map: row_map = ( pd.read_csv( importlib.resources.open_text( "pudl.package_data.ferc1.row_maps", f"{table_name}.csv"), index_col=0, comment="#") .copy().transpose() .rename_axis(index="year_index", columns=None) ) row_map.index = row_map.index.astype(int) # For each year, rename row numbers to variable names based on row_map. rename_dict = {} out_df = pd.DataFrame() for year in row_map.index: rename_dict = {v: k for k, v in dict(row_map.loc[year, :]).items()} _ = rename_dict.pop(-1, None) df = ferc1_df.loc[ferc1_df.report_year == year].copy() df.loc[:, "row_name"] = df.loc[:, "row_number"].replace(rename_dict) # The concatenate according to row_name out_df = pd.concat([out_df, df], axis="index") # Is this list of index columns universal? Or should they be an argument? idx_cols = [ "respondent_id", "report_year", "report_prd", "spplmnt_num", "row_name" ] logger.info( f"{len(out_df[out_df.duplicated(idx_cols)])/len(out_df):.4%} " f"of unpacked records were duplicates, and discarded." ) # Index the dataframe based on the list of index_cols # Unstack the dataframe based on variable names out_df = ( out_df.loc[:, idx_cols + data_cols] # These lost records should be minimal. If not, something's wrong. .drop_duplicates(subset=idx_cols) .set_index(idx_cols) .unstack("row_name") .loc[:, (slice(None), data_rows)] ) return out_df def cols_to_cats(df, cat_name, col_cats): """ Turn top-level MultiIndex columns into a categorial column. In some cases FERC Form 1 data comes with many different types of related values interleaved in the same table -- e.g. current year and previous year income -- this can result in DataFrames that are hundreds of columns wide, which is unwieldy. This function takes those top level MultiIndex labels and turns them into categories in a single column, which can be used to select a particular type of report. Args: df (pandas.DataFrame): the dataframe to be simplified. cat_name (str): the label of the column to be created indicating what MultiIndex label the values came from. col_cats (dict): a dictionary with top level MultiIndex labels as keys, and the category to which they should be mapped as values. Returns: pandas.DataFrame: A re-shaped/re-labeled dataframe with one fewer levels of MultiIndex in the columns, and an additional column containing the assigned labels. """ out_df = pd.DataFrame() for col, cat in col_cats.items(): logger.info(f"Col: {col}, Cat: {cat}") tmp_df = df.loc[:, col].copy().dropna(how='all') tmp_df.loc[:, cat_name] = cat out_df = pd.concat([out_df, tmp_df]) return out_df.reset_index() def _clean_cols(df, table_name): """Adds a FERC record ID and drop FERC columns not to be loaded into PUDL. It is often useful to be able to tell exactly which record in the FERC Form 1 database a given record within the PUDL database came from. Within each FERC Form 1 table, each record is supposed to be uniquely identified by the combination of: report_year, report_prd, respondent_id, spplmnt_num, row_number. So this function takes a dataframe, checks to make sure it contains each of those columns and that none of them are NULL, and adds a new column to the dataframe containing a string of the format: {table_name}_{report_year}_{report_prd}_{respondent_id}_{spplmnt_num}_{row_number} In some PUDL FERC Form 1 tables (e.g. plant_in_service_ferc1) a single row is re-organized into several new records in order to normalize the data and ensure it is stored in a "tidy" format. In such cases each of the resulting PUDL records will have the same ``record_id``. Otherwise, the ``record_id`` is expected to be unique within each FERC Form 1 table. However there are a handful of cases in which this uniqueness constraint is violated due to data reporting issues in FERC Form 1. In addition to those primary key columns, there are some columns which are not meaningful or useful in the context of PUDL, but which show up in virtually every FERC table, and this function drops them if they are present. These columns include: row_prvlg, row_seq, item, record_number (a temporary column used in plants_small) and all the footnote columns, which end in "_f". Args: df (pandas.DataFrame): The DataFrame in which the function looks for columns for the unique identification of FERC records, and ensures that those columns are not NULL. table_name (str): The name of the table that we are cleaning. Returns: pandas.DataFrame: The same DataFrame with a column appended containing a string of the format {table_name}_{report_year}_{report_prd}_{respondent_id}_{spplmnt_num}_{row_number} Raises: AssertionError: If the table input contains NULL columns """ # Make sure that *all* of these columns exist in the proffered table: for field in ['report_year', 'report_prd', 'respondent_id', 'spplmnt_num', 'row_number']: if field in df.columns: if df[field].isnull().any(): raise AssertionError( f"Null field {field} found in ferc1 table {table_name}." ) # Create a unique inter-year FERC table record ID: df['record_id'] = ( table_name + '_' + df.report_year.astype(str) + '_' + df.report_prd.astype(str) + '_' + df.respondent_id.astype(str) + '_' + df.spplmnt_num.astype(str) ) # Because of the way we are re-organizing columns and rows to create well # normalized tables, there may or may not be a row number available. if "row_number" in df.columns: df["record_id"] = df["record_id"] + "_" + df.row_number.astype(str) # Check to make sure that the generated record_id is unique... since # that's kind of the whole point. There are couple of genuine bad # records here that are taken care of in the transform step, so just # print a warning. n_dupes = df.record_id.duplicated().values.sum() if n_dupes: dupe_ids = df.record_id[df.record_id.duplicated()].values logger.warning( f"{n_dupes} duplicate record_id values found " f"in pre-transform table {table_name}: {dupe_ids}." ) # Drop any _f columns... since we're not using the FERC Footnotes... # Drop columns and don't complain about it if they don't exist: no_f = [c for c in df.columns if not re.match(".*_f$", c)] df = ( df.loc[:, no_f] .drop(['spplmnt_num', 'row_number', 'row_prvlg', 'row_seq', 'report_prd', 'item', 'record_number'], errors='ignore', axis="columns") .rename(columns={"respondent_id": "utility_id_ferc1"}) ) return df def _multiplicative_error_correction(tofix, mask, minval, maxval, mults): """Corrects data entry errors where data being multiplied by a factor. In many cases we know that a particular column in the database should have a value in a particular rage (e.g. the heat content of a ton of coal is a well defined physical quantity -- it can be 15 mmBTU/ton or 22 mmBTU/ton, but it can't be 1 mmBTU/ton or 100 mmBTU/ton). Sometimes these fields are reported in the wrong units (e.g. kWh of electricity generated rather than MWh) resulting in several distributions that have a similar shape showing up at different ranges of value within the data. This function takes a one dimensional data series, a description of a valid range for the values, and a list of factors by which we expect to see some of the data multiplied due to unit errors. Data found in these "ghost" distributions are multiplied by the appropriate factor to bring them into the expected range. Data values which are not found in one of the acceptable multiplicative ranges are set to NA. Args: tofix (pandas.Series): A 1-dimensional data series containing the values to be fixed. mask (pandas.Series): A 1-dimensional masking array of True/False values, which will be used to select a subset of the tofix series onto which we will apply the multiplicative fixes. min (float): the minimum realistic value for the data series. max (float): the maximum realistic value for the data series. mults (list of floats): values by which "real" data may have been multiplied due to common data entry errors. These values both show us where to look in the full data series to find recoverable data, and also tell us by what factor those values need to be multiplied to bring them back into the reasonable range. Returns: fixed (pandas.Series): a data series of the same length as the input, but with the transformed values. """ # Grab the subset of the input series we are going to work on: records_to_fix = tofix[mask] # Drop those records from our output series fixed = tofix.drop(records_to_fix.index) # Iterate over the multipliers, applying fixes to outlying populations for mult in mults: records_to_fix = records_to_fix.apply(lambda x: x * mult if x > minval / mult and x < maxval / mult else x) # Set any record that wasn't inside one of our identified populations to # NA -- we are saying that these are true outliers, which can't be part # of the population of values we are examining. records_to_fix = records_to_fix.apply(lambda x: np.nan if x < minval or x > maxval else x) # Add our fixed records back to the complete data series and return it fixed = pd.concat([fixed, records_to_fix]) return fixed ############################################################################## # DATABASE TABLE SPECIFIC PROCEDURES ########################################## ############################################################################## def plants_steam(ferc1_raw_dfs, ferc1_transformed_dfs): """Transforms FERC Form 1 plant_steam data for loading into PUDL Database. This includes converting to our preferred units of MWh and MW, as well as standardizing the strings describing the kind of plant and construction. Args: ferc1_raw_dfs (dict): Each entry in this dictionary of DataFrame objects corresponds to a table from the FERC Form 1 DBC database. ferc1_transformed_dfs (dict): A dictionary of DataFrames to be transformed. Returns: dict: of transformed dataframes, including the newly transformed plants_steam_ferc1 dataframe. """ ferc1_steam_df = ( ferc1_raw_dfs['plants_steam_ferc1']. pipe(_plants_steam_clean). pipe(_plants_steam_assign_plant_ids, ferc1_transformed_dfs['fuel_ferc1']) ) plants_steam_validate_ids(ferc1_steam_df) ferc1_steam_df = ferc1_steam_df.replace( {'construction_type': 'unknown', 'plant_type': 'unknown'}, pd.NA) ferc1_transformed_dfs['plants_steam_ferc1'] = ferc1_steam_df return ferc1_transformed_dfs def _plants_steam_clean(ferc1_steam_df): ferc1_steam_df = ( ferc1_steam_df.rename(columns={ "plant_name": "plant_name_ferc1", "yr_const": 'construction_year', "plant_kind": 'plant_type', "type_const": 'construction_type', "asset_retire_cost": 'asset_retirement_cost', "yr_installed": 'installation_year', "tot_capacity": 'capacity_mw', "peak_demand": 'peak_demand_mw', "plant_hours": 'plant_hours_connected_while_generating', "plnt_capability": 'plant_capability_mw', "when_limited": 'water_limited_capacity_mw', "when_not_limited": 'not_water_limited_capacity_mw', "avg_num_of_emp": 'avg_num_employees', "net_generation": 'net_generation_kwh', "cost_land": 'capex_land', "cost_structure": 'capex_structures', "cost_equipment": 'capex_equipment', "cost_of_plant_to": 'capex_total', "cost_per_kw": 'capex_per_kw', "expns_operations": 'opex_operations', "expns_fuel": 'opex_fuel', "expns_coolants": 'opex_coolants', "expns_steam": 'opex_steam', "expns_steam_othr": 'opex_steam_other', "expns_transfer": 'opex_transfer', "expns_electric": 'opex_electric', "expns_misc_power": 'opex_misc_power', "expns_rents": 'opex_rents', "expns_allowances": 'opex_allowances', "expns_engnr": 'opex_engineering', "expns_structures": 'opex_structures', "expns_boiler": 'opex_boiler', "expns_plants": 'opex_plants', "expns_misc_steam": 'opex_misc_steam', "tot_prdctn_expns": 'opex_production_total', "expns_kwh": 'opex_per_kwh'}) .pipe(_clean_cols, "f1_steam") .pipe(pudl.helpers.simplify_strings, ['plant_name_ferc1']) .pipe(pudl.helpers.cleanstrings, ['construction_type', 'plant_type'], [CONSTRUCTION_TYPE_STRINGS, PLANT_KIND_STRINGS], unmapped=pd.NA) .pipe(pudl.helpers.oob_to_nan, cols=["construction_year", "installation_year"], lb=1850, ub=max(DataSource.from_id("ferc1").working_partitions["years"]) + 1) .assign( capex_per_mw=lambda x: 1000.0 * x.capex_per_kw, opex_per_mwh=lambda x: 1000.0 * x.opex_per_kwh, net_generation_mwh=lambda x: x.net_generation_kwh / 1000.0, ) .drop(columns=["capex_per_kw", "opex_per_kwh", "net_generation_kwh"]) ) for col in ['construction_type', 'plant_type']: if ferc1_steam_df[col].isnull().any(): raise AssertionError( f"NA values found in {col} column during FERC 1 steam clean, add string to dictionary for this column" ) return ferc1_steam_df def _plants_steam_assign_plant_ids(ferc1_steam_df, ferc1_fuel_df): """Assign IDs to the large steam plants.""" ########################################################################### # FERC PLANT ID ASSIGNMENT ########################################################################### # Now we need to assign IDs to the large steam plants, since FERC doesn't # do this for us. logger.info("Identifying distinct large FERC plants for ID assignment.") # scikit-learn still doesn't deal well with NA values (this will be fixed # eventually) We need to massage the type and missing data for the # Classifier to work. ferc1_steam_df = pudl.helpers.fix_int_na( ferc1_steam_df, columns=['construction_year']) # Grab fuel consumption proportions for use in assigning plant IDs: fuel_fractions = fuel_by_plant_ferc1(ferc1_fuel_df) ffc = list(fuel_fractions.filter(regex='.*_fraction_mmbtu$').columns) ferc1_steam_df = ( ferc1_steam_df.merge( fuel_fractions[ ['utility_id_ferc1', 'plant_name_ferc1', 'report_year'] + ffc], on=['utility_id_ferc1', 'plant_name_ferc1', 'report_year'], how='left' ) ) # We need to fill the null values for these numerical feature vectors with # zeros. not ideal, but the model requires dealing with nulls null_to_zero = ffc + ['capacity_mw'] ferc1_steam_df[null_to_zero] = ( ferc1_steam_df[null_to_zero].fillna(value=0.0)) # Train the classifier using DEFAULT weights, parameters not listed here. ferc1_clf = pudl.transform.ferc1.make_ferc1_clf(ferc1_steam_df) ferc1_clf = ferc1_clf.fit_transform(ferc1_steam_df) # Use the classifier to generate groupings of similar records: record_groups = ferc1_clf.predict(ferc1_steam_df.record_id) n_tot = len(ferc1_steam_df) n_grp = len(record_groups) pct_grp = n_grp / n_tot logger.info( f"Successfully associated {n_grp} of {n_tot} ({pct_grp:.2%}) " f"FERC Form 1 plant records with multi-year plant entities.") record_groups.columns = record_groups.columns.astype(str) cols = record_groups.columns record_groups = record_groups.reset_index() # Now we are going to create a graph (network) that describes all of the # binary relationships between a seed_id and the record_ids that it has # been associated with in any other year. Each connected component of that # graph is a ferc plant time series / plant_id logger.info("Assigning IDs to multi-year FERC plant entities.") edges_df = pd.DataFrame(columns=['source', 'target']) for col in cols: new_edges = record_groups[['seed_id', col]] new_edges = new_edges.rename( {'seed_id': 'source', col: 'target'}, axis=1) edges_df = pd.concat([edges_df, new_edges], sort=True) # Drop any records where there's no target ID (no match in a year) edges_df = edges_df[edges_df.target != ''] # We still have to deal with the orphaned records -- any record which # wasn't place in a time series but is still valid should be included as # its own independent "plant" for completeness, and use in aggregate # analysis. orphan_record_ids = np.setdiff1d(ferc1_steam_df.record_id.unique(), record_groups.values.flatten()) logger.info( f"Identified {len(orphan_record_ids)} orphaned FERC plant records. " f"Adding orphans to list of plant entities.") orphan_df = pd.DataFrame({'source': orphan_record_ids, 'target': orphan_record_ids}) edges_df = pd.concat([edges_df, orphan_df], sort=True) # Use the data frame we've compiled to create a graph G = nx.from_pandas_edgelist(edges_df, # noqa: N806 source='source', target='target') # Find the connected components of the graph ferc1_plants = (G.subgraph(c) for c in nx.connected_components(G)) # Now we'll iterate through the connected components and assign each of # them a FERC Plant ID, and pull the results back out into a dataframe: plants_w_ids = [] for plant_id_ferc1, plant in enumerate(ferc1_plants): nx.set_edge_attributes(plant, plant_id_ferc1 + 1, name='plant_id_ferc1') new_plant_df = nx.to_pandas_edgelist(plant) plants_w_ids.append(new_plant_df) plants_w_ids = pd.concat(plants_w_ids) logger.info( f"Successfully Identified {plant_id_ferc1+1-len(orphan_record_ids)} " f"multi-year plant entities.") # Set the construction year back to numeric because it is. ferc1_steam_df['construction_year'] = pd.to_numeric( ferc1_steam_df['construction_year'], errors='coerce') # We don't actually want to save the fuel fractions in this table... they # were only here to help us match up the plants. ferc1_steam_df = ferc1_steam_df.drop(ffc, axis=1) # Now we need a list of all the record IDs, with their associated # FERC 1 plant IDs. However, the source-target listing isn't # guaranteed to list every one of the nodes in either list, so we # need to compile them together to ensure that we get every single sources = ( plants_w_ids. drop('target', axis=1). drop_duplicates(). rename({'source': 'record_id'}, axis=1) ) targets = ( plants_w_ids. drop('source', axis=1). drop_duplicates(). rename({'target': 'record_id'}, axis=1) ) plants_w_ids = ( pd.concat([sources, targets]). drop_duplicates(). sort_values(['plant_id_ferc1', 'record_id']) ) steam_rids = ferc1_steam_df.record_id.values pwids_rids = plants_w_ids.record_id.values missing_ids = [rid for rid in steam_rids if rid not in pwids_rids] if missing_ids: raise AssertionError( f"Uh oh, we lost {abs(len(steam_rids)-len(pwids_rids))} FERC " f"steam plant record IDs: {missing_ids}" ) ferc1_steam_df = pd.merge(ferc1_steam_df, plants_w_ids, on='record_id') return ferc1_steam_df def plants_steam_validate_ids(ferc1_steam_df): """Tests that plant_id_ferc1 times series includes one record per year. Args: ferc1_steam_df (pandas.DataFrame): A DataFrame of the data from the FERC 1 Steam table. Returns: None """ ########################################################################## # FERC PLANT ID ERROR CHECKING STUFF ########################################################################## # Test to make sure that we don't have any plant_id_ferc1 time series # which include more than one record from a given year. Warn the user # if we find such cases (which... we do, as of writing) year_dupes = ( ferc1_steam_df. groupby(['plant_id_ferc1', 'report_year'])['utility_id_ferc1']. count(). reset_index(). rename(columns={'utility_id_ferc1': 'year_dupes'}). query('year_dupes>1') ) if len(year_dupes) > 0: for dupe in year_dupes.itertuples(): logger.error( f"Found report_year={dupe.report_year} " f"{dupe.year_dupes} times in " f"plant_id_ferc1={dupe.plant_id_ferc1}" ) else: logger.info( "No duplicate years found in any plant_id_ferc1. Hooray!" ) def fuel(ferc1_raw_dfs, ferc1_transformed_dfs): """Transforms FERC Form 1 fuel data for loading into PUDL Database. This process includes converting some columns to be in terms of our preferred units, like MWh and mmbtu instead of kWh and btu. Plant names are also standardized (stripped & lower). Fuel and fuel unit strings are also standardized using our cleanstrings() function and string cleaning dictionaries found above (FUEL_STRINGS, etc.) Args: ferc1_raw_dfs (dict): Each entry in this dictionary of DataFrame objects corresponds to a table from the FERC Form 1 DBC database. ferc1_transformed_dfs (dict): A dictionary of DataFrames to be transformed. Returns: dict: The dictionary of transformed dataframes. """ # grab table from dictionary of dfs, clean it up a bit fuel_ferc1_df = ( _clean_cols(ferc1_raw_dfs['fuel_ferc1'], 'f1_fuel'). # Standardize plant_name capitalization and remove leading/trailing # white space -- necesary b/c plant_name is part of many foreign keys. pipe(pudl.helpers.simplify_strings, ['plant_name']). # Take the messy free-form fuel & fuel_unit fields, and do our best to # map them to some canonical categories... this is necessarily # imperfect: pipe(pudl.helpers.cleanstrings, ['fuel', 'fuel_unit'], [FUEL_STRINGS, FUEL_UNIT_STRINGS], unmapped=pd.NA). # Fuel cost per kWh is a per-unit value that doesn't make sense to # report for a single fuel that may be only a small part of the fuel # consumed. "fuel generaton" is heat rate, but as it's based only on # the heat content of a given fuel which may only be a small portion of # the overall fuel # consumption, it doesn't make any sense here. Drop # it. drop(['fuel_cost_kwh', 'fuel_generaton'], axis=1). # Convert from BTU/unit of fuel to 1e6 BTU/unit. assign(fuel_avg_mmbtu_per_unit=lambda x: x.fuel_avg_heat / 1e6). drop('fuel_avg_heat', axis=1). # Rename the columns to match our DB definitions rename(columns={ # FERC 1 DB Name PUDL DB Name "plant_name": "plant_name_ferc1", 'fuel': 'fuel_type_code_pudl', 'fuel_unit': 'fuel_units', 'fuel_avg_mmbtu_per_unit': 'fuel_mmbtu_per_unit', 'fuel_quantity': 'fuel_consumed_units', 'fuel_cost_burned': 'fuel_cost_per_unit_burned', 'fuel_cost_delvd': 'fuel_cost_per_unit_delivered', 'fuel_cost_btu': 'fuel_cost_per_mmbtu'}) ) if fuel_ferc1_df['fuel_units'].isnull().any(): raise AssertionError( "NA values found in fuel_units column during FERC 1 fuel clean, add string to dictionary" ) ######################################################################### # CORRECT DATA ENTRY ERRORS ############################################# ######################################################################### coal_mask = fuel_ferc1_df['fuel_type_code_pudl'] == 'coal' gas_mask = fuel_ferc1_df['fuel_type_code_pudl'] == 'gas' oil_mask = fuel_ferc1_df['fuel_type_code_pudl'] == 'oil' corrections = [ # mult = 2000: reported in units of lbs instead of short tons # mult = 1e6: reported BTUs instead of mmBTUs # minval and maxval of 10 and 29 mmBTUs are the range of values # specified by EIA 923 instructions at: # https://www.eia.gov/survey/form/eia_923/instructions.pdf ['fuel_mmbtu_per_unit', coal_mask, 10.0, 29.0, (2e3, 1e6)], # mult = 1e-2: reported cents/mmBTU instead of USD/mmBTU # minval and maxval of .5 and 7.5 dollars per mmBTUs are the # end points of the primary distribution of EIA 923 fuel receipts # and cost per mmBTU data weighted by quantity delivered ['fuel_cost_per_mmbtu', coal_mask, 0.5, 7.5, (1e-2, )], # mult = 1e3: reported fuel quantity in cubic feet, not mcf # mult = 1e6: reported fuel quantity in BTU, not mmBTU # minval and maxval of .8 and 1.2 mmBTUs are the range of values # specified by EIA 923 instructions ['fuel_mmbtu_per_unit', gas_mask, 0.8, 1.2, (1e3, 1e6)], # mult = 1e-2: reported in cents/mmBTU instead of USD/mmBTU # minval and maxval of 1 and 35 dollars per mmBTUs are the # end points of the primary distribution of EIA 923 fuel receipts # and cost per mmBTU data weighted by quantity delivered ['fuel_cost_per_mmbtu', gas_mask, 1, 35, (1e-2, )], # mult = 42: reported fuel quantity in gallons, not barrels # mult = 1e6: reported fuel quantity in BTU, not mmBTU # minval and maxval of 3 and 6.9 mmBTUs are the range of values # specified by EIA 923 instructions ['fuel_mmbtu_per_unit', oil_mask, 3, 6.9, (42, )], # mult = 1e-2: reported in cents/mmBTU instead of USD/mmBTU # minval and maxval of 5 and 33 dollars per mmBTUs are the # end points of the primary distribution of EIA 923 fuel receipts # and cost per mmBTU data weighted by quantity delivered ['fuel_cost_per_mmbtu', oil_mask, 5, 33, (1e-2, )] ] for (coltofix, mask, minval, maxval, mults) in corrections: fuel_ferc1_df[coltofix] = \ _multiplicative_error_correction(fuel_ferc1_df[coltofix], mask, minval, maxval, mults) ######################################################################### # REMOVE BAD DATA ####################################################### ######################################################################### # Drop any records that are missing data. This is a blunt instrument, to # be sure. In some cases we lose data here, because some utilities have # (for example) a "Total" line w/ only fuel_mmbtu_per_kwh on it. Grr. fuel_ferc1_df.dropna(inplace=True) # Replace "unkown" fuel unit with NAs - this comes after we drop missing data with NAs fuel_ferc1_df = fuel_ferc1_df.replace({'fuel_units': 'unknown'}, pd.NA) ferc1_transformed_dfs['fuel_ferc1'] = fuel_ferc1_df return ferc1_transformed_dfs def plants_small(ferc1_raw_dfs, ferc1_transformed_dfs): """Transforms FERC Form 1 plant_small data for loading into PUDL Database. This FERC Form 1 table contains information about a large number of small plants, including many small hydroelectric and other renewable generation facilities. Unfortunately the data is not well standardized, and so the plants have been categorized manually, with the results of that categorization stored in an Excel spreadsheet. This function reads in the plant type data from the spreadsheet and merges it with the rest of the information from the FERC DB based on record number, FERC respondent ID, and report year. When possible the FERC license number for small hydro plants is also manually extracted from the data. This categorization will need to be renewed with each additional year of FERC data we pull in. As of v0.1 the small plants have been categorized for 2004-2015. Args: ferc1_raw_dfs (dict): Each entry in this dictionary of DataFrame objects corresponds to a table from the FERC Form 1 DBC database. ferc1_transformed_dfs (dict): A dictionary of DataFrames to be transformed. Returns: dict: The dictionary of transformed dataframes. """ # grab table from dictionary of dfs ferc1_small_df = ferc1_raw_dfs['plants_small_ferc1'] # Standardize plant_name_raw capitalization and remove leading/trailing # white space -- necesary b/c plant_name_raw is part of many foreign keys. ferc1_small_df = pudl.helpers.simplify_strings( ferc1_small_df, ['plant_name', 'kind_of_fuel'] ) # Force the construction and installation years to be numeric values, and # set them to NA if they can't be converted. (table has some junk values) ferc1_small_df = pudl.helpers.oob_to_nan( ferc1_small_df, cols=["yr_constructed"], lb=1850, ub=max(DataSource.from_id("ferc1").working_partitions["years"]) + 1) # Convert from cents per mmbtu to dollars per mmbtu to be consistent # with the f1_fuel table data. Also, let's use a clearer name. ferc1_small_df['fuel_cost_per_mmbtu'] = ferc1_small_df['fuel_cost'] / 100.0 ferc1_small_df.drop('fuel_cost', axis=1, inplace=True) # Create a single "record number" for the individual lines in the FERC # Form 1 that report different small plants, so that we can more easily # tell whether they are adjacent to each other in the reporting. ferc1_small_df['record_number'] = 46 * ferc1_small_df['spplmnt_num'] + \ ferc1_small_df['row_number'] # Unforunately the plant types were not able to be parsed automatically # in this table. It's been done manually for 2004-2015, and the results # get merged in in the following section. small_types_file = importlib.resources.open_binary( 'pudl.package_data.ferc1', 'small_plants_2004-2016.xlsx') small_types_df = pd.read_excel(small_types_file) # Only rows with plant_type set will give us novel information. small_types_df.dropna(subset=['plant_type', ], inplace=True) # We only need this small subset of the columns to extract the plant type. small_types_df = small_types_df[['report_year', 'respondent_id', 'record_number', 'plant_name_clean', 'plant_type', 'ferc_license']] # Munge the two dataframes together, keeping everything from the # frame we pulled out of the FERC1 DB, and supplementing it with the # plant_name, plant_type, and ferc_license fields from our hand # made file. ferc1_small_df = pd.merge(ferc1_small_df, small_types_df, how='left', on=['report_year', 'respondent_id', 'record_number']) # Remove extraneous columns and add a record ID ferc1_small_df = _clean_cols(ferc1_small_df, 'f1_gnrt_plant') # Standardize plant_name capitalization and remove leading/trailing white # space, so that plant_name matches formatting of plant_name_raw ferc1_small_df = pudl.helpers.simplify_strings( ferc1_small_df, ['plant_name_clean']) # in order to create one complete column of plant names, we have to use the # cleaned plant names when available and the orignial plant names when the # cleaned version is not available, but the strings first need cleaning ferc1_small_df['plant_name_clean'] = ferc1_small_df['plant_name_clean'].fillna( value="") ferc1_small_df['plant_name_clean'] = ferc1_small_df.apply( lambda row: row['plant_name'] if (row['plant_name_clean'] == "") else row['plant_name_clean'], axis=1) # now we don't need the uncleaned version anymore # ferc1_small_df.drop(['plant_name'], axis=1, inplace=True) ferc1_small_df.rename(columns={ # FERC 1 DB Name PUDL DB Name 'plant_name': 'plant_name_ferc1', 'ferc_license': 'ferc_license_id', 'yr_constructed': 'construction_year', 'capacity_rating': 'capacity_mw', 'net_demand': 'peak_demand_mw', 'net_generation': 'net_generation_mwh', 'plant_cost': 'total_cost_of_plant', 'plant_cost_mw': 'capex_per_mw', 'operation': 'opex_total', 'expns_fuel': 'opex_fuel', 'expns_maint': 'opex_maintenance', 'kind_of_fuel': 'fuel_type', 'fuel_cost': 'fuel_cost_per_mmbtu'}, inplace=True) ferc1_transformed_dfs['plants_small_ferc1'] = ferc1_small_df return ferc1_transformed_dfs def plants_hydro(ferc1_raw_dfs, ferc1_transformed_dfs): """Transforms FERC Form 1 plant_hydro data for loading into PUDL Database. Standardizes plant names (stripping whitespace and Using Title Case). Also converts into our preferred units of MW and MWh. Args: ferc1_raw_dfs (dict): Each entry in this dictionary of DataFrame objects corresponds to a table from the FERC Form 1 DBC database. ferc1_transformed_dfs (dict): A dictionary of DataFrames to be transformed. Returns: dict: The dictionary of transformed dataframes. """ # grab table from dictionary of dfs ferc1_hydro_df = ( _clean_cols(ferc1_raw_dfs['plants_hydro_ferc1'], 'f1_hydro') # Standardize plant_name capitalization and remove leading/trailing # white space -- necesary b/c plant_name is part of many foreign keys. .pipe(pudl.helpers.simplify_strings, ['plant_name']) .pipe(pudl.helpers.cleanstrings, ['plant_const'], [CONSTRUCTION_TYPE_STRINGS], unmapped=pd.NA) .assign( # Converting kWh to MWh net_generation_mwh=lambda x: x.net_generation / 1000.0, # Converting cost per kW installed to cost per MW installed: cost_per_mw=lambda x: x.cost_per_kw * 1000.0, # Converting kWh to MWh expns_per_mwh=lambda x: x.expns_kwh * 1000.0) .pipe(pudl.helpers.oob_to_nan, cols=["yr_const", "yr_installed"], lb=1850, ub=max(DataSource.from_id("ferc1").working_partitions["years"]) + 1) .drop(columns=['net_generation', 'cost_per_kw', 'expns_kwh']) .rename(columns={ # FERC1 DB PUDL DB "plant_name": "plant_name_ferc1", 'project_no': 'project_num', 'yr_const': 'construction_year', 'plant_kind': 'plant_type', 'plant_const': 'construction_type', 'yr_installed': 'installation_year', 'tot_capacity': 'capacity_mw', 'peak_demand': 'peak_demand_mw', 'plant_hours': 'plant_hours_connected_while_generating', 'favorable_cond': 'net_capacity_favorable_conditions_mw', 'adverse_cond': 'net_capacity_adverse_conditions_mw', 'avg_num_of_emp': 'avg_num_employees', 'cost_of_land': 'capex_land', 'cost_structure': 'capex_structures', 'cost_facilities': 'capex_facilities', 'cost_equipment': 'capex_equipment', 'cost_roads': 'capex_roads', 'cost_plant_total': 'capex_total', 'cost_per_mw': 'capex_per_mw', 'expns_operations': 'opex_operations', 'expns_water_pwr': 'opex_water_for_power', 'expns_hydraulic': 'opex_hydraulic', 'expns_electric': 'opex_electric', 'expns_generation': 'opex_generation_misc', 'expns_rents': 'opex_rents', 'expns_engineering': 'opex_engineering', 'expns_structures': 'opex_structures', 'expns_dams': 'opex_dams', 'expns_plant': 'opex_plant', 'expns_misc_plant': 'opex_misc_plant', 'expns_per_mwh': 'opex_per_mwh', 'expns_engnr': 'opex_engineering', 'expns_total': 'opex_total', 'asset_retire_cost': 'asset_retirement_cost', '': '', }) .drop_duplicates( subset=["report_year", "utility_id_ferc1", "plant_name_ferc1", "capacity_mw"], keep=False) ) if ferc1_hydro_df['construction_type'].isnull().any(): raise AssertionError( "NA values found in construction_type column during FERC1 hydro clean, add string to CONSTRUCTION_TYPE_STRINGS" ) ferc1_hydro_df = ferc1_hydro_df.replace({'construction_type': 'unknown'}, pd.NA) ferc1_transformed_dfs['plants_hydro_ferc1'] = ferc1_hydro_df return ferc1_transformed_dfs def plants_pumped_storage(ferc1_raw_dfs, ferc1_transformed_dfs): """Transforms FERC Form 1 pumped storage data for loading into PUDL. Standardizes plant names (stripping whitespace and Using Title Case). Also converts into our preferred units of MW and MWh. Args: ferc1_raw_dfs (dict): Each entry in this dictionary of DataFrame objects corresponds to a table from the FERC Form 1 DBC database. ferc1_transformed_dfs (dict): A dictionary of DataFrames to be transformed. Returns: dict: The dictionary of transformed dataframes. """ # grab table from dictionary of dfs ferc1_pump_df = ( _clean_cols( ferc1_raw_dfs['plants_pumped_storage_ferc1'], 'f1_pumped_storage') # Standardize plant_name capitalization and remove leading/trailing # white space -- necesary b/c plant_name is part of many foreign keys. .pipe(pudl.helpers.simplify_strings, ['plant_name']) # Clean up the messy plant construction type column: .pipe(pudl.helpers.cleanstrings, ['plant_kind'], [CONSTRUCTION_TYPE_STRINGS], unmapped=pd.NA) .assign( # Converting from kW/kWh to MW/MWh net_generation_mwh=lambda x: x.net_generation / 1000.0, energy_used_for_pumping_mwh=lambda x: x.energy_used / 1000.0, net_load_mwh=lambda x: x.net_load / 1000.0, cost_per_mw=lambda x: x.cost_per_kw * 1000.0, expns_per_mwh=lambda x: x.expns_kwh * 1000.0) .pipe(pudl.helpers.oob_to_nan, cols=["yr_const", "yr_installed"], lb=1850, ub=max(DataSource.from_id("ferc1").working_partitions["years"]) + 1) .drop(columns=['net_generation', 'energy_used', 'net_load', 'cost_per_kw', 'expns_kwh']) .rename(columns={ # FERC1 DB PUDL DB "plant_name": "plant_name_ferc1", 'project_number': 'project_num', 'tot_capacity': 'capacity_mw', 'project_no': 'project_num', 'plant_kind': 'construction_type', 'peak_demand': 'peak_demand_mw', 'yr_const': 'construction_year', 'yr_installed': 'installation_year', 'plant_hours': 'plant_hours_connected_while_generating', 'plant_capability': 'plant_capability_mw', 'avg_num_of_emp': 'avg_num_employees', 'cost_wheels': 'capex_wheels_turbines_generators', 'cost_land': 'capex_land', 'cost_structures': 'capex_structures', 'cost_facilties': 'capex_facilities', 'cost_wheels_turbines_generators': 'capex_wheels_turbines_generators', 'cost_electric': 'capex_equipment_electric', 'cost_misc_eqpmnt': 'capex_equipment_misc', 'cost_roads': 'capex_roads', 'asset_retire_cost': 'asset_retirement_cost', 'cost_of_plant': 'capex_total', 'cost_per_mw': 'capex_per_mw', 'expns_operations': 'opex_operations', 'expns_water_pwr': 'opex_water_for_power', 'expns_pump_strg': 'opex_pumped_storage', 'expns_electric': 'opex_electric', 'expns_misc_power': 'opex_generation_misc', 'expns_rents': 'opex_rents', 'expns_engneering': 'opex_engineering', 'expns_structures': 'opex_structures', 'expns_dams': 'opex_dams', 'expns_plant': 'opex_plant', 'expns_misc_plnt': 'opex_misc_plant', 'expns_producton': 'opex_production_before_pumping', 'pumping_expenses': 'opex_pumping', 'tot_prdctn_exns': 'opex_total', 'expns_per_mwh': 'opex_per_mwh', }) .drop_duplicates( subset=["report_year", "utility_id_ferc1", "plant_name_ferc1", "capacity_mw"], keep=False) ) if ferc1_pump_df['construction_type'].isnull().any(): raise AssertionError( "NA values found in construction_type column during FERC 1 pumped storage clean, add string to CONSTRUCTION_TYPE_STRINGS" ) ferc1_pump_df = ferc1_pump_df.replace({'construction_type': 'unknown'}, pd.NA) ferc1_transformed_dfs['plants_pumped_storage_ferc1'] = ferc1_pump_df return ferc1_transformed_dfs def plant_in_service(ferc1_raw_dfs, ferc1_transformed_dfs): """Transforms FERC Form 1 Plant in Service data for loading into PUDL. Re-organizes the original FERC Form 1 Plant in Service data by unpacking the rows as needed on a year by year basis, to organize them into columns. The "columns" in the original FERC Form 1 denote starting balancing, ending balance, additions, retirements, adjustments, and transfers -- these categories are turned into labels in a column called "amount_type". Because each row in the transformed table is composed of many individual records (rows) from the original table, row_number can't be part of the record_id, which means they are no longer unique. To infer exactly what record a given piece of data came from, the record_id and the row_map (found in the PUDL package_data directory) can be used. Args: ferc1_raw_dfs (dict): Each entry in this dictionary of DataFrame objects corresponds to a table from the FERC Form 1 DBC database. ferc1_transformed_dfs (dict): A dictionary of DataFrames to be transformed. Returns: dict: The dictionary of the transformed DataFrames. """ pis_df = ( unpack_table( ferc1_df=ferc1_raw_dfs["plant_in_service_ferc1"], table_name="f1_plant_in_srvce", data_rows=slice(None), # Gotta catch 'em all! data_cols=[ "begin_yr_bal", "addition", "retirements", "adjustments", "transfers", "yr_end_bal" ]) .pipe( # Convert top level of column index into a categorical column: cols_to_cats, cat_name="amount_type", col_cats={ "begin_yr_bal": "starting_balance", "addition": "additions", "retirements": "retirements", "adjustments": "adjustments", "transfers": "transfers", "yr_end_bal": "ending_balance", }) .rename_axis(columns=None) .pipe(_clean_cols, "f1_plant_in_srvce") .set_index([ "utility_id_ferc1", "report_year", "amount_type", "record_id"]) .reset_index() ) # Get rid of the columns corresponding to "header" rows in the FERC # form, which should *never* contain data... but in about 2 dozen cases, # they do. See this issue on Github for more information: # https://github.com/catalyst-cooperative/pudl/issues/471 pis_df = pis_df.drop(columns=pis_df.filter(regex=".*_head$").columns) ferc1_transformed_dfs["plant_in_service_ferc1"] = pis_df return ferc1_transformed_dfs def purchased_power(ferc1_raw_dfs, ferc1_transformed_dfs): """Transforms FERC Form 1 pumped storage data for loading into PUDL. This table has data about inter-utility power purchases into the PUDL DB. This includes how much electricty was purchased, how much it cost, and who it was purchased from. Unfortunately the field describing which other utility the power was being bought from is poorly standardized, making it difficult to correlate with other data. It will need to be categorized by hand or with some fuzzy matching eventually. Args: ferc1_raw_dfs (dict): Each entry in this dictionary of DataFrame objects corresponds to a table from the FERC Form 1 DBC database. ferc1_transformed_dfs (dict): A dictionary of DataFrames to be transformed. Returns: dict: The dictionary of the transformed DataFrames. """ # grab table from dictionary of dfs df = ( _clean_cols(ferc1_raw_dfs['purchased_power_ferc1'], 'f1_purchased_pwr') .rename(columns={ 'athrty_co_name': 'seller_name', 'sttstcl_clssfctn': 'purchase_type_code', 'rtsched_trffnbr': 'tariff', 'avgmth_bill_dmnd': 'billing_demand_mw', 'avgmth_ncp_dmnd': 'non_coincident_peak_demand_mw', 'avgmth_cp_dmnd': 'coincident_peak_demand_mw', 'mwh_purchased': 'purchased_mwh', 'mwh_recv': 'received_mwh', 'mwh_delvd': 'delivered_mwh', 'dmnd_charges': 'demand_charges', 'erg_charges': 'energy_charges', 'othr_charges': 'other_charges', 'settlement_tot': 'total_settlement'}) .assign( # Require these columns to numeric, or NaN billing_demand_mw=lambda x: pd.to_numeric( x.billing_demand_mw, errors="coerce"), non_coincident_peak_demand_mw=lambda x: pd.to_numeric( x.non_coincident_peak_demand_mw, errors="coerce"), coincident_peak_demand_mw=lambda x: pd.to_numeric( x.coincident_peak_demand_mw, errors="coerce")) .fillna({ # Replace blanks w/ 0.0 in data columns. "purchased_mwh": 0.0, "received_mwh": 0.0, "delivered_mwh": 0.0, "demand_charges": 0.0, "energy_charges": 0.0, "other_charges": 0.0, "total_settlement": 0.0, }) ) # Reencode the power purchase types: df = ( pudl.metadata.classes.Package.from_resource_ids() .get_resource("purchased_power_ferc1") .encode(df) ) # Drop records containing no useful data and also any completely duplicate # records -- there are 6 in 1998 for utility 238 for some reason... df = ( df.drop_duplicates() .drop(df.loc[((df.purchased_mwh == 0) & (df.received_mwh == 0) & (df.delivered_mwh == 0) & (df.demand_charges == 0) & (df.energy_charges == 0) & (df.other_charges == 0) & (df.total_settlement == 0)), :].index) ) ferc1_transformed_dfs['purchased_power_ferc1'] = df return ferc1_transformed_dfs def accumulated_depreciation(ferc1_raw_dfs, ferc1_transformed_dfs): """Transforms FERC Form 1 depreciation data for loading into PUDL. This information is organized by FERC account, with each line of the FERC Form 1 having a different descriptive identifier like 'balance_end_of_year' or 'transmission'. Args: ferc1_raw_dfs (dict): Each entry in this dictionary of DataFrame objects corresponds to a table from the FERC Form 1 DBC database. ferc1_transformed_dfs (dict): A dictionary of DataFrames to be transformed. Returns: dict: The dictionary of the transformed DataFrames. """ # grab table from dictionary of dfs ferc1_apd_df = ferc1_raw_dfs['accumulated_depreciation_ferc1'] ferc1_acct_apd = FERC_DEPRECIATION_LINES.drop( ['ferc_account_description'], axis=1) ferc1_acct_apd.dropna(inplace=True) ferc1_acct_apd['row_number'] = ferc1_acct_apd['row_number'].astype(int) ferc1_accumdepr_prvsn_df = pd.merge(ferc1_apd_df, ferc1_acct_apd, how='left', on='row_number') ferc1_accumdepr_prvsn_df = _clean_cols( ferc1_accumdepr_prvsn_df, 'f1_accumdepr_prvsn') ferc1_accumdepr_prvsn_df.rename(columns={ # FERC1 DB PUDL DB 'total_cde': 'total'}, inplace=True) ferc1_transformed_dfs['accumulated_depreciation_ferc1'] = ferc1_accumdepr_prvsn_df return ferc1_transformed_dfs def transform( ferc1_raw_dfs, ferc1_tables=DataSource.from_id("ferc1").get_resource_ids() ): """Transforms FERC 1. Args: ferc1_raw_dfs (dict): Each entry in this dictionary of DataFrame objects corresponds to a table from the FERC Form 1 DBC database ferc1_tables (tuple): A tuple containing the set of tables which have been successfully integrated into PUDL Returns: dict: A dictionary of the transformed DataFrames. """ ferc1_tfr_funcs = { # fuel must come before steam b/c fuel proportions are used to aid in # plant # ID assignment. 'fuel_ferc1': fuel, 'plants_steam_ferc1': plants_steam, 'plants_small_ferc1': plants_small, 'plants_hydro_ferc1': plants_hydro, 'plants_pumped_storage_ferc1': plants_pumped_storage, 'plant_in_service_ferc1': plant_in_service, 'purchased_power_ferc1': purchased_power, 'accumulated_depreciation_ferc1': accumulated_depreciation } # create an empty ditctionary to fill up through the transform fuctions ferc1_tfr_dfs = {} # for each ferc table, for table in ferc1_tfr_funcs: if table in ferc1_tables: logger.info( f"Transforming raw FERC Form 1 dataframe for " f"loading into {table}") ferc1_tfr_funcs[table](ferc1_raw_dfs, ferc1_tfr_dfs) # convert types and return: return { name: convert_cols_dtypes(df, data_source="ferc1") for name, df in ferc1_tfr_dfs.items() } ############################################################################### # Identifying FERC Plants ############################################################################### # Sadly FERC doesn't provide any kind of real IDs for the plants that report to # them -- all we have is their names (a freeform string) and the data that is # reported alongside them. This is often enough information to be able to # recognize which records ought to be associated with each other year to year # to create a continuous time series. However, we want to do that # programmatically, which means using some clustering / categorization tools # from scikit-learn class FERCPlantClassifier(BaseEstimator, ClassifierMixin): """A classifier for identifying FERC plant time series in FERC Form 1 data. We want to be able to give the classifier a FERC plant record, and get back the group of records(or the ID of the group of records) that it ought to be part of. There are hundreds of different groups of records, and we can only know what they are by looking at the whole dataset ahead of time. This is the "fitting" step, in which the groups of records resulting from a particular set of model parameters(e.g. the weights that are attributes of the class) are generated. Once we have that set of record categories, we can test how well the classifier performs, by checking it against test / training data which we have already classified by hand. The test / training set is a list of lists of unique FERC plant record IDs(each record ID is the concatenation of: report year, respondent id, supplement number, and row number). It could also be stored as a dataframe where each column is associated with a year of data(some of which could be empty). Not sure what the best structure would be. If it's useful, we can assign each group a unique ID that is the time ordered concatenation of each of the constituent record IDs. Need to understand what the process for checking the classification of an input record looks like. To score a given classifier, we can look at what proportion of the records in the test dataset are assigned to the same group as in our manual classification of those records. There are much more complicated ways to do the scoring too... but for now let's just keep it as simple as possible. """ def __init__(self, min_sim=0.75, plants_df=None): """ Initialize the classifier. Args: min_sim : Number between 0.0 and 1.0, indicating the minimum value of cosine similarity that we are willing to accept as indicating two records are part of the same plant record time series. All entries in the pairwise similarity matrix below this value will be zeroed out. plants_df : The entire FERC Form 1 plant table as a dataframe. Needed in order to calculate the distance metrics between all of the records so we can group the plants in the fit() step, so we can check how well they are categorized later... Todo: Zane revisit plants_df """ self.min_sim = min_sim self.plants_df = plants_df self._years = self.plants_df.report_year.unique() def fit(self, X, y=None): # noqa: N803 Canonical capital letter... """ Use weighted FERC plant features to group records into time series. The fit method takes the vectorized, normalized, weighted FERC plant features (X) as input, calculates the pairwise cosine similarity matrix between all records, and groups the records in their best time series. The similarity matrix and best time series are stored as data members in the object for later use in scoring & predicting. This isn't quite the way a fit method would normally work. Args: X (): a sparse matrix of size n_samples x n_features. y (): Returns: pandas.DataFrame: TODO: Zane revisit args and returns """ self._cossim_df = pd.DataFrame(cosine_similarity(X)) self._best_of = self._best_by_year() # Make the best match indices integers rather than floats w/ NA values. self._best_of[self._years] = self._best_of[self._years].fillna( -1).astype(int) return self def transform(self, X, y=None): # noqa: N803 """Passthrough transform method -- just returns self.""" return self def predict(self, X, y=None): # noqa: N803 """ Identify time series of similar records to input record_ids. Given a one-dimensional dataframe X, containing FERC record IDs, return a dataframe in which each row corresponds to one of the input record_id values (ordered as the input was ordered), with each column corresponding to one of the years worth of data. Values in the returned dataframe are the FERC record_ids of the record most similar to the input record within that year. Some of them may be null, if there was no sufficiently good match. Row index is the seed record IDs. Column index is years. TODO: * This method is hideously inefficient. It should be vectorized. * There's a line that throws a FutureWarning that needs to be fixed. """ try: getattr(self, "_cossim_df") except AttributeError: raise RuntimeError( "You must train classifer before predicting data!") tmp_best = pd.concat([ self._best_of.loc[:, ["record_id"] + list(self._years)], pd.DataFrame(data=[""], index=[-1], columns=["record_id"]) ]) out_dfs = [] # For each record_id we've been given: for x in X: # Find the index associated with the record ID we are predicting # a grouping for: idx = tmp_best[tmp_best.record_id == x].index.values[0] # Mask the best_of dataframe, keeping only those entries where # the index of the chosen record_id appears -- this results in a # huge dataframe almost full of NaN values. w_m = ( tmp_best[self._years][tmp_best[self._years] == idx] # Grab the index values of the rows in the masked dataframe which # are NOT all NaN -- these are the indices of the *other* records # which found the record x to be one of their best matches. .dropna(how="all").index.values ) # Now look up the indices of the records which were found to be # best matches to the record x. b_m = tmp_best.loc[idx, self._years].astype(int) # Here we require that there is no conflict between the two sets # of indices -- that every time a record shows up in a grouping, # that grouping is either the same, or a subset of the other # groupings that it appears in. When no sufficiently good match # is found the "index" in the _best_of array is set to -1, so # requiring that the b_m value be >=0 screens out those no-match # cases. This is okay -- we're just trying to require that the # groupings be internally self-consistent, not that they are # completely identical. Being flexible on this dramatically # increases the number of records that get assigned a plant ID. if np.array_equiv(w_m, b_m[b_m >= 0].values): # This line is causing a warning. In cases where there are # some years no sufficiently good match exists, and so b_m # doesn't contain an index. Instead, it has a -1 sentinel # value, which isn't a label for which a record exists, which # upsets .loc. Need to find some way around this... but for # now it does what we want. We could use .iloc instead, but # then the -1 sentinel value maps to the last entry in the # dataframe, which also isn't what we want. Blargh. new_grp = tmp_best.loc[b_m, "record_id"] # Stack the new list of record_ids on our output DataFrame: out_dfs.append( pd.DataFrame( data=new_grp.values.reshape(1, len(self._years)), index=pd.Index( [tmp_best.loc[idx, "record_id"]], name="seed_id"), columns=self._years ) ) return pd.concat(out_dfs) def score(self, X, y=None): # noqa: N803 """Scores a collection of FERC plant categorizations. For every record ID in X, predict its record group and calculate a metric of similarity between the prediction and the "ground truth" group that was passed in for that value of X. Args: X (pandas.DataFrame): an n_samples x 1 pandas dataframe of FERC Form 1 record IDs. y (pandas.DataFrame): a dataframe of "ground truth" FERC Form 1 record groups, corresponding to the list record IDs in X Returns: numpy.ndarray: The average of all the similarity metrics as the score. """ scores = [] for true_group in y: true_group = str.split(true_group, sep=',') true_group = [s for s in true_group if s != ''] predicted_groups = self.predict(pd.DataFrame(true_group)) for rec_id in true_group: sm = SequenceMatcher(None, true_group, predicted_groups.loc[rec_id]) scores = scores + [sm.ratio()] return np.mean(scores) def _best_by_year(self): """Finds the best match for each plant record in each other year.""" # only keep similarity matrix entries above our minimum threshold: out_df = self.plants_df.copy() sim_df = self._cossim_df[self._cossim_df >= self.min_sim] # Add a column for each of the years, in which we will store indices # of the records which best match the record in question: for yr in self._years: newcol = yr out_df[newcol] = -1 # seed_yr is the year we are matching *from* -- we do the entire # matching process from each year, since it may not be symmetric: for seed_yr in self._years: seed_idx = self.plants_df.index[ self.plants_df.report_year == seed_yr] # match_yr is all the other years, in which we are finding the best # match for match_yr in self._years: best_of_yr = match_yr match_idx = self.plants_df.index[ self.plants_df.report_year == match_yr] # For each record specified by seed_idx, obtain the index of # the record within match_idx that that is the most similar. best_idx = sim_df.iloc[seed_idx, match_idx].idxmax(axis=1) out_df.iloc[seed_idx, out_df.columns.get_loc(best_of_yr)] = best_idx return out_df def make_ferc1_clf(plants_df, ngram_min=2, ngram_max=10, min_sim=0.75, plant_name_ferc1_wt=2.0, plant_type_wt=2.0, construction_type_wt=1.0, capacity_mw_wt=1.0, construction_year_wt=1.0, utility_id_ferc1_wt=1.0, fuel_fraction_wt=1.0): """ Create a FERC Plant Classifier using several weighted features. Given a FERC steam plants dataframe plants_df, which also includes fuel consumption information, transform a selection of useful columns into features suitable for use in calculating inter-record cosine similarities. Individual features are weighted according to the keyword arguments. Features include: * plant_name (via TF-IDF, with ngram_min and ngram_max as parameters) * plant_type (OneHot encoded categorical feature) * construction_type (OneHot encoded categorical feature) * capacity_mw (MinMax scaled numerical feature) * construction year (OneHot encoded categorical feature) * utility_id_ferc1 (OneHot encoded categorical feature) * fuel_fraction_mmbtu (several MinMax scaled numerical columns, which are normalized and treated as a single feature.) This feature matrix is then used to instantiate a FERCPlantClassifier. The combination of the ColumnTransformer and FERCPlantClassifier are combined in a sklearn Pipeline, which is returned by the function. Arguments: ngram_min (int): the minimum lengths to consider in the vectorization of the plant_name feature. ngram_max (int): the maximum n-gram lengths to consider in the vectorization of the plant_name feature. min_sim (float): the minimum cosine similarity between two records that can be considered a "match" (a number between 0.0 and 1.0). plant_name_ferc1_wt (float): weight used to determine the relative importance of each of the features in the feature matrix used to calculate the cosine similarity between records. Used to scale each individual feature before the vectors are normalized. plant_type_wt (float): weight used to determine the relative importance of each of the features in the feature matrix used to calculate the cosine similarity between records. Used to scale each individual feature before the vectors are normalized. construction_type_wt (float): weight used to determine the relative importance of each of the features in the feature matrix used to calculate the cosine similarity between records. Used to scale each individual feature before the vectors are normalized. capacity_mw_wt (float):weight used to determine the relative importance of each of the features in the feature matrix used to calculate the cosine similarity between records. Used to scale each individual feature before the vectors are normalized. construction_year_wt (float): weight used to determine the relative importance of each of the features in the feature matrix used to calculate the cosine similarity between records. Used to scale each individual feature before the vectors are normalized. utility_id_ferc1_wt (float): weight used to determine the relative importance of each of the features in the feature matrix used to calculate the cosine similarity between records. Used to scale each individual feature before the vectors are normalized. fuel_fraction_wt (float): weight used to determine the relative importance of each of the features in the feature matrix used to calculate the cosine similarity between records. Used to scale each individual feature before the vectors are normalized. Returns: sklearn.pipeline.Pipeline: an sklearn Pipeline that performs reprocessing and classification with a FERCPlantClassifier object. """ # Make a list of all the fuel fraction columns for use as one feature. fuel_cols = list(plants_df.filter(regex='.*_fraction_mmbtu$').columns) ferc1_pipe = Pipeline([ ('preprocessor', ColumnTransformer( transformers=[ ('plant_name_ferc1', TfidfVectorizer( analyzer='char', ngram_range=(ngram_min, ngram_max)), 'plant_name_ferc1'), ('plant_type', OneHotEncoder( categories='auto'), ['plant_type']), ('construction_type', OneHotEncoder( categories='auto'), ['construction_type']), ('capacity_mw', MinMaxScaler(), ['capacity_mw']), ('construction_year', OneHotEncoder( categories='auto'), ['construction_year']), ('utility_id_ferc1', OneHotEncoder( categories='auto'), ['utility_id_ferc1']), ('fuel_fraction_mmbtu', Pipeline([ ('scaler', MinMaxScaler()), ('norm', Normalizer()) ]), fuel_cols), ], transformer_weights={ 'plant_name_ferc1': plant_name_ferc1_wt, 'plant_type': plant_type_wt, 'construction_type': construction_type_wt, 'capacity_mw': capacity_mw_wt, 'construction_year': construction_year_wt, 'utility_id_ferc1': utility_id_ferc1_wt, 'fuel_fraction_mmbtu': fuel_fraction_wt, }) ), ('classifier', pudl.transform.ferc1.FERCPlantClassifier( min_sim=min_sim, plants_df=plants_df)) ]) return ferc1_pipe def fuel_by_plant_ferc1(fuel_df, thresh=0.5): """Calculates useful FERC Form 1 fuel metrics on a per plant-year basis. Each record in the FERC Form 1 corresponds to a particular type of fuel. Many plants -- especially coal plants -- use more than one fuel, with gas and/or diesel serving as startup fuels. In order to be able to classify the type of plant based on relative proportions of fuel consumed or fuel costs it is useful to aggregate these per-fuel records into a single record for each plant. Fuel cost (in nominal dollars) and fuel heat content (in mmBTU) are calculated for each fuel based on the cost and heat content per unit, and the number of units consumed, and then summed by fuel type (there can be more than one record for a given type of fuel in each plant because we are simplifying the fuel categories). The per-fuel records are then pivoted to create one column per fuel type. The total is summed and stored separately, and the individual fuel costs & heat contents are divided by that total, to yield fuel proportions. Based on those proportions and a minimum threshold that's passed in, a "primary" fuel type is then assigned to the plant-year record and given a string label. Args: fuel_df (pandas.DataFrame): Pandas DataFrame resembling the post-transform result for the fuel_ferc1 table. thresh (float): A value between 0.5 and 1.0 indicating the minimum fraction of overall heat content that must have been provided by a fuel in a plant-year for it to be considered the "primary" fuel for the plant in that year. Default value: 0.5. Returns: pandas.DataFrame: A DataFrame with a single record for each plant-year, including the columns required to merge it with the plants_steam_ferc1 table/DataFrame (report_year, utility_id_ferc1, and plant_name) as well as totals for fuel mmbtu consumed in that plant-year, and the cost of fuel in that year, the proportions of heat content and fuel costs for each fuel in that year, and a column that labels the plant's primary fuel for that year. Raises: AssertionError: If the DataFrame input does not have the columns required to run the function. """ keep_cols = [ 'report_year', # key 'utility_id_ferc1', # key 'plant_name_ferc1', # key 'fuel_type_code_pudl', # pivot 'fuel_consumed_units', # value 'fuel_mmbtu_per_unit', # value 'fuel_cost_per_unit_burned', # value ] # Ensure that the dataframe we've gotten has all the information we need: for col in keep_cols: if col not in fuel_df.columns: raise AssertionError( f"Required column {col} not found in input fuel_df." ) # Calculate per-fuel derived values and add them to the DataFrame df = ( # Really there should *not* be any duplicates here but... there's a # bug somewhere that introduces them into the fuel_ferc1 table. fuel_df[keep_cols].drop_duplicates(). # Calculate totals for each record based on per-unit values: assign(fuel_mmbtu=lambda x: x.fuel_consumed_units * x.fuel_mmbtu_per_unit). assign(fuel_cost=lambda x: x.fuel_consumed_units * x.fuel_cost_per_unit_burned). # Drop the ratios and heterogeneous fuel "units" drop(['fuel_mmbtu_per_unit', 'fuel_cost_per_unit_burned', 'fuel_consumed_units'], axis=1). # Group by the keys and fuel type, and sum: groupby(['utility_id_ferc1', 'plant_name_ferc1', 'report_year', 'fuel_type_code_pudl']). agg(sum).reset_index(). # Set the index to the keys, and pivot to get per-fuel columns: set_index(['utility_id_ferc1', 'plant_name_ferc1', 'report_year']). pivot(columns='fuel_type_code_pudl').fillna(0.0) ) # undo pivot. Could refactor this old function but out of scope for now (fixing a pandas API deprecation) plant_year_totals = df.stack('fuel_type_code_pudl').groupby(level=[0, 1, 2]).sum() # Calculate total heat content burned for each plant, and divide it out mmbtu_group = ( pd.merge( # Sum up all the fuel heat content, and divide the individual fuel # heat contents by it (they are all contained in single higher # level group of columns labeled fuel_mmbtu) df.loc[:, 'fuel_mmbtu'].div( df.loc[:, 'fuel_mmbtu'].sum(axis=1), axis='rows'), # Merge that same total into the dataframe separately as well. plant_year_totals.loc[:, 'fuel_mmbtu'], right_index=True, left_index=True). rename(columns=lambda x: re.sub(r'$', '_fraction_mmbtu', x)). rename(columns=lambda x: re.sub(r'_mmbtu_fraction_mmbtu$', '_mmbtu', x)) ) # Calculate total fuel cost for each plant, and divide it out cost_group = ( pd.merge( # Sum up all the fuel costs, and divide the individual fuel # costs by it (they are all contained in single higher # level group of columns labeled fuel_cost) df.loc[:, 'fuel_cost'].div( df.loc[:, 'fuel_cost'].sum(axis=1), axis='rows'), # Merge that same total into the dataframe separately as well. plant_year_totals.loc[:, 'fuel_cost'], right_index=True, left_index=True). rename(columns=lambda x: re.sub(r'$', '_fraction_cost', x)). rename(columns=lambda x: re.sub(r'_cost_fraction_cost$', '_cost', x)) ) # Re-unify the cost and heat content information: df = pd.merge(mmbtu_group, cost_group, left_index=True, right_index=True).reset_index() # Label each plant-year record by primary fuel: for fuel_str in FUEL_STRINGS: try: mmbtu_mask = df[f'{fuel_str}_fraction_mmbtu'] > thresh df.loc[mmbtu_mask, 'primary_fuel_by_mmbtu'] = fuel_str except KeyError: pass try: cost_mask = df[f'{fuel_str}_fraction_cost'] > thresh df.loc[cost_mask, 'primary_fuel_by_cost'] = fuel_str except KeyError: pass df[['primary_fuel_by_cost', 'primary_fuel_by_mmbtu']] = df[[ 'primary_fuel_by_cost', 'primary_fuel_by_mmbtu']].fillna('') return df
catalyst-cooperative/pudl
src/pudl/transform/ferc1.py
Python
mit
103,623
[ "BLAST" ]
967b64130d65a6c0bf0386767bcd17daa1dea6392de3c1245279145fcb6382f7
#!/usr/bin/env python import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torch.autograd import Variable from random import shuffle __all__ = ['ResidualBlock', 'ResNet', 'StackRegressive'] torch.set_default_tensor_type('torch.DoubleTensor') # 3x3 Convolution def conv3x3(in_channels, out_channels, stride=1): return nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=stride, padding=1, bias=False) # Residual Block class ResidualBlock(nn.Module): def __init__(self, in_channels, out_channels, stride=1, downsample=None): super(ResidualBlock, self).__init__() self.conv1 = conv3x3(in_channels, out_channels, stride) self.bn1 = nn.BatchNorm2d(out_channels) self.relu = nn.ReLU(inplace=True) self.conv2 = conv3x3(out_channels, out_channels) self.bn2 = nn.BatchNorm2d(out_channels) self.downsample = downsample def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) if self.downsample: residual = self.downsample(x) out += residual out = self.relu(out) return out # ResNet Module class ResNet(nn.Module): def __init__(self, block, layers, num_classes=2): super(ResNet, self).__init__() self.in_channels = 16 self.conv = conv3x3(3, 16) self.bn = nn.BatchNorm2d(16) self.relu = nn.ReLU(inplace=True) self.layer1 = self.make_layer(block, 16, layers[0]) self.layer2 = self.make_layer(block, 32, layers[0], 2) self.layer3 = self.make_layer(block, 64, layers[1], 2) self.avg_pool = nn.AvgPool2d(8) self.fc = nn.Linear(64, num_classes) self.saved_attention = [] self.rewards = [] def make_layer(self, block, out_channels, blocks, stride=1): downsample = None if (stride != 1) or (self.in_channels != out_channels): downsample = nn.Sequential( conv3x3(self.in_channels, out_channels, stride=stride), nn.BatchNorm2d(out_channels)) layers = [] layers.append(block(self.in_channels, out_channels, stride, downsample)) self.in_channels = out_channels for i in range(1, blocks): layers.append(block(out_channels, out_channels)) return nn.Sequential(*layers) def forward(self, x): out = self.conv(x) out = self.bn(out) out = self.relu(out) out = self.layer1(out) out = self.layer2(out) out = self.layer3(out) out = self.avg_pool(out) out = out.view(out.size(0), -1) out = self.fc(out) return out class StackRegressive(nn.Module): def __init__(self, **kwargs): super(StackRegressive, self).__init__() """ Following the interpretable learning from self-driving examples: https://arxiv.org/pdf/1703.10631.pdf we can extract the last feature cube x_t from the resnet model as a set of L = W x H vectors of depth D. Since these share the same feature extraction layers, only the final regression layers need to be recomputed after computing the classification network We then stack an LSTM module on this layer to obtain the detection predictions The number of outputs is thus given: First 4 cols represent top and lower coordinates of face boxes, Followed by 2 cols belonging to left eye pixel coords, last 2 cols are the right eye coords """ # Backprop Through Time (Recurrent Layer) Params self.noutputs = kwargs['noutputs'] self.num_layers = kwargs['numLayers'] self.input_size = kwargs['inputSize'] self.hidden_size = kwargs['nHidden'] self.batch_size = kwargs['batchSize'] self.noutputs = kwargs['noutputs'] self.ship2gpu = kwargs['ship2gpu'] """ Now stack an LSTM on top of the convnet to generate bounding box predictions Since last conv layer in classifier is a 64-layer, we initiate our LSTM with a 64-neuron input layer """ #define the recurrent connections self.lstm1 = nn.LSTM(self.input_size, self.hidden_size[0], self.num_layers, bias=False, batch_first=False, dropout=0.3) self.lstm2 = nn.LSTM(self.hidden_size[0], self.hidden_size[1], self.num_layers, bias=False, batch_first=False, dropout=0.3) self.fc = nn.Linear(self.hidden_size[1], self.noutputs) def forward(self, x): nBatch = x.size(0) # Forward propagate RNN layer 1 out, state_0 = self.lstm1(x) # Forward propagate RNN layer 2 out, state_1 = self.lstm2(out) # Decode hidden state of last time step out = self.fc(out[:, -1, :]) out = out.view(nBatch, -1) return out class SimpleRegressor(nn.Module): def __init__(self, **kwargs): super(SimpleRegressor, self).__init__() self.criterion = nn.MSELoss(size_average=False) # Backprop Through Time (Recurrent Layer) Params self.noutputs = kwargs['noutputs'] self.input_size = kwargs['inputSize'] self.hidden_size = kwargs['nHidden'] self.batch_size = kwargs['batchSize'] self.noutputs = kwargs['noutputs'] self.ship2gpu = kwargs['ship2gpu'] #define the recurrent connections self.fc1 = nn.Linear(self.input_size, self.hidden_size[0]) self.fc2 = nn.Linear(self.hidden_size[0], self.noutputs) def forward(self, x): out = F.relu(self.fc1(x)) out = self.fc2(out) return out class RecurrentModel(nn.Module): def __init__(self, **kwargs): super(RecurrentModel, self).__init__() ''' See Sharma, S., Kiros, R., & Salakhutdinov, R. Workshop track -ICLR 2016 ACTION RECOGNITION USING VISUAL ATTENTION. Retrieved from https://arxiv.org/pdf/1511.04119.pdf ''' self.criterion = nn.MSELoss(size_average=False) # Backprop Through Time (Recurrent Layer) Params self.noutputs = kwargs['noutputs'] self.num_layers = kwargs['numLayers'] self.input_size = kwargs['inputSize'] self.hidden_size = kwargs['nHidden'] self.batch_size = kwargs['batchSize'] self.noutputs = kwargs['noutputs'] self.ship2gpu = kwargs['ship2gpu'] #define the recurrent connections self.lstm1 = nn.LSTM(self.input_size, self.hidden_size[0], self.num_layers, bias=False, batch_first=False, dropout=0.3) self.lstm2 = nn.LSTM(self.hidden_size[0], self.hidden_size[1], self.num_layers, bias=False, batch_first=False, dropout=0.3) self.lstm3 = nn.LSTM(self.hidden_size[1], self.hidden_size[2], self.num_layers, bias=False, batch_first=False, dropout=0.3) def forward(self, x): # Forward propagate RNN layer 1 out, state_0 = self.lstm1(x) # Forward propagate RNN layer 2 out, state_1 = self.lstm2(out) # # Forward propagate RNN layer 3 out, state_2 = self.lstm3(out) #at this point, out will be seqLength x batchSize x PooledSize # Decode hidden state of last time step # out = out[-1:, :,] # out = out.view(-1) # out will be seqLength x PooledSize return out, state_2
lakehanne/ensenso
ensenso_detect/manikin/model.py
Python
mit
7,606
[ "NEURON" ]
b19777cd09b7a3cd857f08398ce1f7ea915d3ac0709a17afcb15abdb099a6254
# -*- coding: utf-8 -*- # Copyright (C) 2003-2007 CAMP # Copyright (C) 2007-2008 CAMd # Please see the accompanying LICENSE file for further information. """This module defines a PAW-class. The central object that glues everything together!""" import numpy as np from ase.units import Bohr, Hartree from ase.dft.kpoints import monkhorst_pack from ase.calculators.calculator import kptdensity2monkhorstpack import gpaw.io import gpaw.mpi as mpi import gpaw.occupations as occupations from gpaw import dry_run, memory_estimate_depth, KohnShamConvergenceError from gpaw.hooks import hooks from gpaw.density import RealSpaceDensity from gpaw.eigensolvers import get_eigensolver from gpaw.band_descriptor import BandDescriptor from gpaw.grid_descriptor import GridDescriptor from gpaw.kohnsham_layouts import get_KohnSham_layouts from gpaw.hamiltonian import RealSpaceHamiltonian from gpaw.utilities.timing import Timer from gpaw.xc import XC from gpaw.xc.sic import SIC from gpaw.kpt_descriptor import KPointDescriptor from gpaw.wavefunctions.base import EmptyWaveFunctions from gpaw.wavefunctions.fd import FDWaveFunctions from gpaw.wavefunctions.lcao import LCAOWaveFunctions from gpaw.wavefunctions.pw import PW, ReciprocalSpaceDensity, \ ReciprocalSpaceHamiltonian from gpaw.utilities.memory import MemNode, maxrss from gpaw.parameters import InputParameters from gpaw.setup import Setups from gpaw.output import PAWTextOutput from gpaw.scf import SCFLoop from gpaw.forces import ForceCalculator from gpaw.utilities.gpts import get_number_of_grid_points class PAW(PAWTextOutput): """This is the main calculation object for doing a PAW calculation.""" timer_class = Timer def __init__(self, filename=None, **kwargs): """ASE-calculator interface. The following parameters can be used: nbands, xc, kpts, spinpol, gpts, h, charge, usesymm, width, mixer, hund, lmax, fixdensity, convergence, txt, parallel, communicator, dtype, softgauss and stencils. If you don't specify any parameters, you will get: Defaults: neutrally charged, LDA, gamma-point calculation, a reasonable grid-spacing, zero Kelvin electronic temperature, and the number of bands will be equal to the number of atomic orbitals present in the setups. Only occupied bands are used in the convergence decision. The calculation will be spin-polarized if and only if one or more of the atoms have non-zero magnetic moments. Text output will be written to standard output. For a non-gamma point calculation, the electronic temperature will be 0.1 eV (energies are extrapolated to zero Kelvin) and all symmetries will be used to reduce the number of **k**-points.""" PAWTextOutput.__init__(self) self.grid_descriptor_class = GridDescriptor self.input_parameters = InputParameters() self.timer = self.timer_class() self.scf = None self.forces = ForceCalculator(self.timer) self.stress_vv = None self.dipole_v = None self.magmom_av = None self.wfs = EmptyWaveFunctions() self.occupations = None self.density = None self.hamiltonian = None self.atoms = None self.iter = 0 self.initialized = False self.nbands_parallelization_adjustment = None # Somehow avoid this? # Possibly read GPAW keyword arguments from file: if filename is not None and filename.endswith('.gkw'): from gpaw.utilities.kwargs import load parameters = load(filename) parameters.update(kwargs) kwargs = parameters filename = None # XXX if filename is not None: comm = kwargs.get('communicator', mpi.world) reader = gpaw.io.open(filename, 'r', comm) self.atoms = gpaw.io.read_atoms(reader) par = self.input_parameters par.read(reader) self.set(**kwargs) if filename is not None: # Setups are not saved in the file if the setups were not loaded # *from* files in the first place if par.setups is None: if par.idiotproof: raise RuntimeError('Setups not specified in file. Use ' 'idiotproof=False to proceed anyway.') else: par.setups = {None: 'paw'} if par.basis is None: if par.idiotproof: raise RuntimeError('Basis not specified in file. Use ' 'idiotproof=False to proceed anyway.') else: par.basis = {} self.initialize() self.read(reader) self.print_cell_and_parameters() self.observers = [] def read(self, reader): gpaw.io.read(self, reader) def set(self, **kwargs): """Change parameters for calculator. Examples:: calc.set(xc='PBE') calc.set(nbands=20, kpts=(4, 1, 1)) """ p = self.input_parameters if (kwargs.get('h') is not None) and (kwargs.get('gpts') is not None): raise TypeError("""You can't use both "gpts" and "h"!""") if 'h' in kwargs: p['gpts'] = None if 'gpts' in kwargs: p['h'] = None # Special treatment for dictionary parameters: for name in ['convergence', 'parallel']: if kwargs.get(name) is not None: tmp = p[name] for key in kwargs[name]: if not key in tmp: raise KeyError('Unknown subparameter "%s" in ' 'dictionary parameter "%s"' % (key, name)) tmp.update(kwargs[name]) kwargs[name] = tmp self.initialized = False for key in kwargs: if key == 'basis' and p['mode'] == 'fd': continue if key == 'eigensolver': self.wfs.set_eigensolver(None) if key in ['fixmom', 'mixer', 'verbose', 'txt', 'hund', 'random', 'eigensolver', 'idiotproof', 'notify', 'usefractrans']: continue if key in ['convergence', 'fixdensity', 'maxiter']: self.scf = None continue # More drastic changes: self.scf = None self.wfs.set_orthonormalized(False) if key in ['lmax', 'width', 'stencils', 'external', 'xc', 'poissonsolver', 'occupations']: self.hamiltonian = None self.occupations = None elif key in ['charge']: self.hamiltonian = None self.density = None self.wfs = EmptyWaveFunctions() self.occupations = None elif key in ['kpts', 'nbands', 'usesymm']: self.wfs = EmptyWaveFunctions() self.occupations = None elif key in ['h', 'gpts', 'setups', 'spinpol', 'realspace', 'parallel', 'communicator', 'dtype', 'mode']: self.density = None self.occupations = None self.hamiltonian = None self.wfs = EmptyWaveFunctions() elif key in ['basis']: self.wfs = EmptyWaveFunctions() elif key in ['parsize', 'parsize_bands', 'parstride_bands']: name = {'parsize': 'domain', 'parsize_bands': 'band', 'parstride_bands': 'stridebands'}[key] raise DeprecationWarning( 'Keyword argument has been moved ' + "to the 'parallel' dictionary keyword under '%s'." % name) else: raise TypeError("Unknown keyword argument: '%s'" % key) p.update(kwargs) def calculate(self, atoms=None, converge=False, force_call_to_set_positions=False): """Update PAW calculaton if needed. Returns True/False whether a calculation was performed or not.""" self.timer.start('Initialization') if atoms is None: atoms = self.atoms if self.atoms is None: # First time: self.initialize(atoms) self.set_positions(atoms) elif (len(atoms) != len(self.atoms) or (atoms.get_atomic_numbers() != self.atoms.get_atomic_numbers()).any() or (atoms.get_initial_magnetic_moments() != self.atoms.get_initial_magnetic_moments()).any() or (atoms.get_cell() != self.atoms.get_cell()).any() or (atoms.get_pbc() != self.atoms.get_pbc()).any()): # Drastic changes: self.wfs = EmptyWaveFunctions() self.occupations = None self.density = None self.hamiltonian = None self.scf = None self.initialize(atoms) self.set_positions(atoms) elif not self.initialized: self.initialize(atoms) self.set_positions(atoms) elif (atoms.get_positions() != self.atoms.get_positions()).any(): self.density.reset() self.set_positions(atoms) elif not self.scf.converged: # Do not call scf.check_convergence() here as it overwrites # scf.converged, and setting scf.converged is the only # 'practical' way for a user to force the calculation to proceed self.set_positions(atoms) elif force_call_to_set_positions: self.set_positions(atoms) self.timer.stop('Initialization') if self.scf.converged: return False else: self.print_cell_and_parameters() self.timer.start('SCF-cycle') for iter in self.scf.run(self.wfs, self.hamiltonian, self.density, self.occupations): self.iter = iter self.call_observers(iter) self.print_iteration(iter) self.timer.stop('SCF-cycle') if self.scf.converged: self.call_observers(iter, final=True) self.print_converged(iter) if 'converged' in hooks: hooks['converged'](self) elif converge: if 'not_converged' in hooks: hooks['not_converged'](self) self.txt.write(oops) raise KohnShamConvergenceError( 'Did not converge! See text output for help.') return True def initialize_positions(self, atoms=None): """Update the positions of the atoms.""" if atoms is None: atoms = self.atoms else: # Save the state of the atoms: self.atoms = atoms.copy() self.check_atoms() spos_ac = atoms.get_scaled_positions() % 1.0 self.wfs.set_positions(spos_ac) self.density.set_positions(spos_ac, self.wfs.rank_a) self.hamiltonian.set_positions(spos_ac, self.wfs.rank_a) return spos_ac def set_positions(self, atoms=None): """Update the positions of the atoms and initialize wave functions.""" spos_ac = self.initialize_positions(atoms) self.wfs.initialize(self.density, self.hamiltonian, spos_ac) self.wfs.eigensolver.reset() self.scf.reset() self.forces.reset() self.stress_vv = None self.dipole_v = None self.magmom_av = None self.print_positions() def initialize(self, atoms=None): """Inexpensive initialization.""" if atoms is None: atoms = self.atoms else: # Save the state of the atoms: self.atoms = atoms.copy() par = self.input_parameters world = par.communicator if world is None: world = mpi.world elif hasattr(world, 'new_communicator'): # Check for whether object has correct type already # # Using isinstance() is complicated because of all the # combinations, serial/parallel/debug... pass else: # world should be a list of ranks: world = mpi.world.new_communicator(np.asarray(world)) self.wfs.world = world self.set_text(par.txt, par.verbose) natoms = len(atoms) cell_cv = atoms.get_cell() / Bohr pbc_c = atoms.get_pbc() Z_a = atoms.get_atomic_numbers() magmom_av = atoms.get_initial_magnetic_moments() # Generate new xc functional only when it is reset by set if self.hamiltonian is None or self.hamiltonian.xc is None: if isinstance(par.xc, str): xc = XC(par.xc) else: xc = par.xc else: xc = self.hamiltonian.xc mode = par.mode if xc.orbital_dependent and mode == 'lcao': raise NotImplementedError('LCAO mode does not support ' 'orbital-dependent XC functionals.') if mode == 'pw': mode = PW() if mode == 'fd' and par.usefractrans: raise NotImplementedError('FD mode does not support ' 'fractional translations.') if mode == 'lcao' and par.usefractrans: raise Warning('Fractional translations have not been tested ' 'with LCAO mode. Use with care!') if par.realspace is None: realspace = not isinstance(mode, PW) else: realspace = par.realspace if isinstance(mode, PW): assert not realspace if par.gpts is not None: N_c = np.array(par.gpts) else: h = par.h if h is not None: h /= Bohr N_c = get_number_of_grid_points(cell_cv, h, mode, realspace) if par.filter is None and not isinstance(mode, PW): gamma = 1.6 hmax = ((np.linalg.inv(cell_cv)**2).sum(0)**-0.5 / N_c).max() def filter(rgd, rcut, f_r, l=0): gcut = np.pi / hmax - 2 / rcut / gamma f_r[:] = rgd.filter(f_r, rcut * gamma, gcut, l) else: filter = par.filter setups = Setups(Z_a, par.setups, par.basis, par.lmax, xc, filter, world) if magmom_av.ndim == 1: collinear = True magmom_av, magmom_a = np.zeros((natoms, 3)), magmom_av magmom_av[:, 2] = magmom_a else: collinear = False magnetic = magmom_av.any() spinpol = par.spinpol if par.hund: if natoms != 1: raise ValueError('hund=True arg only valid for single atoms!') spinpol = True magmom_av[0] = (0, 0, setups[0].get_hunds_rule_moment(par.charge)) if spinpol is None: spinpol = magnetic elif magnetic and not spinpol: raise ValueError('Non-zero initial magnetic moment for a ' + 'spin-paired calculation!') if collinear: nspins = 1 + int(spinpol) ncomp = 1 else: nspins = 1 ncomp = 2 # K-point descriptor bzkpts_kc = kpts2ndarray(par.kpts, self.atoms) kd = KPointDescriptor(bzkpts_kc, nspins, collinear, par.usefractrans) width = par.width if width is None: if pbc_c.any(): width = 0.1 # eV else: width = 0.0 else: assert par.occupations is None if hasattr(self, 'time') or par.dtype == complex: dtype = complex else: if kd.gamma: dtype = float else: dtype = complex ## rbw: If usefractrans=True, kd.set_symmetry might overwrite N_c. ## This is necessary, because N_c must be dividable by 1/(fractional translation), ## f.e. fractional translations of a grid point must land on a grid point. N_c = kd.set_symmetry(atoms, setups, magmom_av, par.usesymm, N_c, world) nao = setups.nao nvalence = setups.nvalence - par.charge M_v = magmom_av.sum(0) M = np.dot(M_v, M_v)**0.5 nbands = par.nbands if nbands is None: nbands = 0 for setup in setups: nbands_from_atom = setup.get_default_nbands() # Any obscure setup errors? if nbands_from_atom < -(-setup.Nv // 2): raise ValueError('Bad setup: This setup requests %d' ' bands but has %d electrons.' % (nbands_from_atom, setup.Nv)) nbands += nbands_from_atom nbands = min(nao, nbands) elif nbands > nao and mode == 'lcao': raise ValueError('Too many bands for LCAO calculation: ' '%d bands and only %d atomic orbitals!' % (nbands, nao)) if nvalence < 0: raise ValueError( 'Charge %f is not possible - not enough valence electrons' % par.charge) if nbands <= 0: nbands = int(nvalence + M + 0.5) // 2 + (-nbands) if nvalence > 2 * nbands: raise ValueError('Too few bands! Electrons: %f, bands: %d' % (nvalence, nbands)) nbands *= ncomp if par.width is not None: self.text('**NOTE**: please start using ' 'occupations=FermiDirac(width).') if par.fixmom: self.text('**NOTE**: please start using ' 'occupations=FermiDirac(width, fixmagmom=True).') if self.occupations is None: if par.occupations is None: # Create object for occupation numbers: self.occupations = occupations.FermiDirac(width, par.fixmom) else: self.occupations = par.occupations self.occupations.magmom = M_v[2] cc = par.convergence if mode == 'lcao': niter_fixdensity = 0 else: niter_fixdensity = None if self.scf is None: self.scf = SCFLoop( cc['eigenstates'] / Hartree**2 * nvalence, cc['energy'] / Hartree * max(nvalence, 1), cc['density'] * nvalence, par.maxiter, par.fixdensity, niter_fixdensity) parsize_kpt = par.parallel['kpt'] parsize_domain = par.parallel['domain'] parsize_bands = par.parallel['band'] if not realspace: pbc_c = np.ones(3, bool) if not self.wfs: if parsize_domain == 'domain only': # XXX this was silly! parsize_domain = world.size parallelization = mpi.Parallelization(world, nspins * kd.nibzkpts) ndomains = None if parsize_domain is not None: ndomains = np.prod(parsize_domain) if isinstance(mode, PW): if ndomains > 1: raise ValueError('Planewave mode does not support ' 'domain decomposition.') ndomains = 1 parallelization.set(kpt=parsize_kpt, domain=ndomains, band=parsize_bands) domain_comm, kpt_comm, band_comm = \ parallelization.build_communicators() #domain_comm, kpt_comm, band_comm = mpi.distribute_cpus( # parsize_domain, parsize_bands, # nspins, kd.nibzkpts, world, par.idiotproof, mode) kd.set_communicator(kpt_comm) parstride_bands = par.parallel['stridebands'] # Unfortunately we need to remember that we adjusted the # number of bands so we can print a warning if it differs # from the number specified by the user. (The number can # be inferred from the input parameters, but it's tricky # because we allow negative numbers) self.nbands_parallelization_adjustment = -nbands % band_comm.size nbands += self.nbands_parallelization_adjustment # I would like to give the following error message, but apparently # there are cases, e.g. gpaw/test/gw_ppa.py, which involve # nbands > nao and are supposed to work that way. #if nbands > nao: # raise ValueError('Number of bands %d adjusted for band ' # 'parallelization %d exceeds number of atomic ' # 'orbitals %d. This problem can be fixed ' # 'by reducing the number of bands a bit.' # % (nbands, band_comm.size, nao)) bd = BandDescriptor(nbands, band_comm, parstride_bands) if (self.density is not None and self.density.gd.comm.size != domain_comm.size): # Domain decomposition has changed, so we need to # reinitialize density and hamiltonian: if par.fixdensity: raise RuntimeError('Density reinitialization conflict ' + 'with "fixdensity" - specify domain decomposition.') self.density = None self.hamiltonian = None # Construct grid descriptor for coarse grids for wave functions: gd = self.grid_descriptor_class(N_c, cell_cv, pbc_c, domain_comm, parsize_domain) # do k-point analysis here? XXX args = (gd, nvalence, setups, bd, dtype, world, kd, self.timer) if par.parallel['sl_auto']: # Choose scalapack parallelization automatically for key, val in par.parallel.items(): if (key.startswith('sl_') and key != 'sl_auto' and val is not None): raise ValueError("Cannot use 'sl_auto' together " "with '%s'" % key) max_scalapack_cpus = bd.comm.size * gd.comm.size nprow = max_scalapack_cpus npcol = 1 # Get a sort of reasonable number of columns/rows while npcol < nprow and nprow % 2 == 0: npcol *= 2 nprow //= 2 assert npcol * nprow == max_scalapack_cpus # ScaLAPACK creates trouble if there aren't at least a few # whole blocks; choose block size so there will always be # several blocks. This will crash for small test systems, # but so will ScaLAPACK in any case blocksize = min(-(-nbands // 4), 64) sl_default = (nprow, npcol, blocksize) else: sl_default = par.parallel['sl_default'] if mode == 'lcao': # Layouts used for general diagonalizer sl_lcao = par.parallel['sl_lcao'] if sl_lcao is None: sl_lcao = sl_default lcaoksl = get_KohnSham_layouts(sl_lcao, 'lcao', gd, bd, dtype, nao=nao, timer=self.timer) if collinear: self.wfs = LCAOWaveFunctions(lcaoksl, *args) else: from gpaw.xc.noncollinear import \ NonCollinearLCAOWaveFunctions self.wfs = NonCollinearLCAOWaveFunctions(lcaoksl, *args) elif mode == 'fd' or isinstance(mode, PW): # buffer_size keyword only relevant for fdpw buffer_size = par.parallel['buffer_size'] # Layouts used for diagonalizer sl_diagonalize = par.parallel['sl_diagonalize'] if sl_diagonalize is None: sl_diagonalize = sl_default diagksl = get_KohnSham_layouts(sl_diagonalize, 'fd', gd, bd, dtype, buffer_size=buffer_size, timer=self.timer) # Layouts used for orthonormalizer sl_inverse_cholesky = par.parallel['sl_inverse_cholesky'] if sl_inverse_cholesky is None: sl_inverse_cholesky = sl_default if sl_inverse_cholesky != sl_diagonalize: message = 'sl_inverse_cholesky != sl_diagonalize ' \ 'is not implemented.' raise NotImplementedError(message) orthoksl = get_KohnSham_layouts(sl_inverse_cholesky, 'fd', gd, bd, dtype, buffer_size=buffer_size, timer=self.timer) # Use (at most) all available LCAO for initialization lcaonbands = min(nbands, nao) try: lcaobd = BandDescriptor(lcaonbands, band_comm, parstride_bands) except RuntimeError: initksl = None else: # Layouts used for general diagonalizer # (LCAO initialization) sl_lcao = par.parallel['sl_lcao'] if sl_lcao is None: sl_lcao = sl_default initksl = get_KohnSham_layouts(sl_lcao, 'lcao', gd, lcaobd, dtype, nao=nao, timer=self.timer) if hasattr(self, 'time'): assert mode == 'fd' from gpaw.tddft import TimeDependentWaveFunctions self.wfs = TimeDependentWaveFunctions(par.stencils[0], diagksl, orthoksl, initksl, gd, nvalence, setups, bd, world, kd, self.timer) elif mode == 'fd': self.wfs = FDWaveFunctions(par.stencils[0], diagksl, orthoksl, initksl, *args) else: # Planewave basis: self.wfs = mode(diagksl, orthoksl, initksl, *args) else: self.wfs = mode(self, *args) else: self.wfs.set_setups(setups) if not self.wfs.eigensolver: # Number of bands to converge: nbands_converge = cc['bands'] if nbands_converge == 'all': nbands_converge = nbands elif nbands_converge != 'occupied': assert isinstance(nbands_converge, int) if nbands_converge < 0: nbands_converge += nbands eigensolver = get_eigensolver(par.eigensolver, mode, par.convergence) eigensolver.nbands_converge = nbands_converge # XXX Eigensolver class doesn't define an nbands_converge property if isinstance(xc, SIC): eigensolver.blocksize = 1 self.wfs.set_eigensolver(eigensolver) if self.density is None: gd = self.wfs.gd if par.stencils[1] != 9: # Construct grid descriptor for fine grids for densities # and potentials: finegd = gd.refine() else: # Special case (use only coarse grid): finegd = gd if realspace: self.density = RealSpaceDensity( gd, finegd, nspins, par.charge + setups.core_charge, collinear, par.stencils[1]) else: self.density = ReciprocalSpaceDensity( gd, finegd, nspins, par.charge + setups.core_charge, collinear) self.density.initialize(setups, self.timer, magmom_av, par.hund) self.density.set_mixer(par.mixer) if self.hamiltonian is None: gd, finegd = self.density.gd, self.density.finegd if realspace: self.hamiltonian = RealSpaceHamiltonian( gd, finegd, nspins, setups, self.timer, xc, par.external, collinear, par.poissonsolver, par.stencils[1], world) else: self.hamiltonian = ReciprocalSpaceHamiltonian( gd, finegd, self.density.pd2, self.density.pd3, nspins, setups, self.timer, xc, par.external, collinear, world) xc.initialize(self.density, self.hamiltonian, self.wfs, self.occupations) self.text() self.print_memory_estimate(self.txt, maxdepth=memory_estimate_depth) self.txt.flush() self.timer.print_info(self) if dry_run: self.dry_run() self.initialized = True def dry_run(self): # Can be overridden like in gpaw.atom.atompaw self.print_cell_and_parameters() self.print_positions() self.txt.flush() raise SystemExit def restore_state(self): """After restart, calculate fine density and poisson solution. These are not initialized by default. TODO: Is this really the most efficient way? """ spos_ac = self.atoms.get_scaled_positions() % 1.0 self.density.set_positions(spos_ac) self.density.interpolate_pseudo_density() self.density.calculate_pseudo_charge() self.hamiltonian.set_positions(spos_ac, self.wfs.rank_a) self.hamiltonian.update(self.density) def attach(self, function, n=1, *args, **kwargs): """Register observer function. Call *function* every *n* iterations using *args* and *kwargs* as arguments.""" try: slf = function.im_self except AttributeError: pass else: if slf is self: # function is a bound method of self. Store the name # of the method and avoid circular reference: function = function.im_func.func_name self.observers.append((function, n, args, kwargs)) def call_observers(self, iter, final=False): """Call all registered callback functions.""" for function, n, args, kwargs in self.observers: if ((iter % n) == 0) != final: if isinstance(function, str): function = getattr(self, function) function(*args, **kwargs) def get_reference_energy(self): return self.wfs.setups.Eref * Hartree def write(self, filename, mode='', cmr_params={}, **kwargs): """Write state to file. use mode='all' to write the wave functions. cmr_params is a dictionary that allows you to specify parameters for CMR (Computational Materials Repository). """ self.timer.start('IO') gpaw.io.write(self, filename, mode, cmr_params=cmr_params, **kwargs) self.timer.stop('IO') def get_myu(self, k, s): """Return my u corresponding to a certain kpoint and spin - or None""" # very slow, but we are sure that we have it for u in range(len(self.wfs.kpt_u)): if self.wfs.kpt_u[u].k == k and self.wfs.kpt_u[u].s == s: return u return None def get_homo_lumo(self): """Return HOMO and LUMO eigenvalues.""" return self.occupations.get_homo_lumo(self.wfs) * Hartree def estimate_memory(self, mem): """Estimate memory use of this object.""" for name, obj in [('Density', self.density), ('Hamiltonian', self.hamiltonian), ('Wavefunctions', self.wfs), ]: obj.estimate_memory(mem.subnode(name)) def print_memory_estimate(self, txt=None, maxdepth=-1): """Print estimated memory usage for PAW object and components. maxdepth is the maximum nesting level of displayed components. The PAW object must be initialize()'d, but needs not have large arrays allocated.""" # NOTE. This should work with --dry-run=N # # However, the initial overhead estimate is wrong if this method # is called within a real mpirun/gpaw-python context. if txt is None: txt = self.txt txt.write('Memory estimate\n') txt.write('---------------\n') mem_init = maxrss() # initial overhead includes part of Hamiltonian! txt.write('Process memory now: %.2f MiB\n' % (mem_init / 1024.0**2)) mem = MemNode('Calculator', 0) try: self.estimate_memory(mem) except AttributeError, m: txt.write('Attribute error: %r' % m) txt.write('Some object probably lacks estimate_memory() method') txt.write('Memory breakdown may be incomplete') mem.calculate_size() mem.write(txt, maxdepth=maxdepth) def converge_wave_functions(self): """Converge the wave-functions if not present.""" if not self.wfs or not self.scf: self.initialize() else: self.wfs.initialize_wave_functions_from_restart_file() spos_ac = self.atoms.get_scaled_positions() % 1.0 self.wfs.set_positions(spos_ac) no_wave_functions = (self.wfs.kpt_u[0].psit_nG is None) converged = self.scf.check_convergence(self.density, self.wfs.eigensolver) if no_wave_functions or not converged: self.wfs.eigensolver.error = np.inf self.scf.converged = False # is the density ok ? error = self.density.mixer.get_charge_sloshing() criterion = (self.input_parameters['convergence']['density'] * self.wfs.nvalence) if error < criterion and not self.hamiltonian.xc.orbital_dependent: self.scf.fix_density() self.calculate() def diagonalize_full_hamiltonian(self, nbands=None, scalapack=None): self.wfs.diagonalize_full_hamiltonian(self.hamiltonian, self.atoms, self.occupations, self.txt, nbands, scalapack) def check_atoms(self): """Check that atoms objects are identical on all processors.""" if not mpi.compare_atoms(self.atoms, comm=self.wfs.world): raise RuntimeError('Atoms objects on different processors ' + 'are not identical!') def kpts2sizeandoffsets(size=None, density=None, gamma=None, even=None, atoms=None): """Helper function for selecting k-points. Use either size or density. size: 3 ints Number of k-points. density: float K-point density in units of k-points per Ang^-1. gamma: None or bool Should the Gamma-point be included? Yes / no / don't care: True / False / None. even: None or bool Should the number of k-points be even? Yes / no / don't care: True / False / None. atoms: Atoms object Needed for calculating k-point density. """ if size is None: if density is None: size = [1, 1, 1] else: size = kptdensity2monkhorstpack(atoms, density, even) offsets = [0, 0, 0] if gamma is not None: for i, s in enumerate(size): if atoms.pbc[i] and s % 2 != bool(gamma): offsets[i] = 0.5 / s return size, offsets def kpts2ndarray(kpts, atoms=None): """Convert kpts keyword to 2-d ndarray of scaled k-points.""" if kpts is None: return np.zeros((1, 3)) if isinstance(kpts, dict): size, offsets = kpts2sizeandoffsets(atoms=atoms, **kpts) return monkhorst_pack(size) + offsets if isinstance(kpts[0], int): return monkhorst_pack(kpts) return np.array(kpts) oops = """ Did not converge! Here are some tips: 1) Make sure the geometry and spin-state is physically sound. 2) Use less aggressive density mixing. 3) Solve the eigenvalue problem more accurately at each scf-step. 4) Use a smoother distribution function for the occupation numbers. 5) Try adding more empty states. 6) Use enough k-points. 7) Don't let your structure optimization algorithm take too large steps. 8) Solve the Poisson equation more accurately. 9) Better initial guess for the wave functions. See details here: https://wiki.fysik.dtu.dk/gpaw/documentation/convergence.html """
robwarm/gpaw-symm
gpaw/paw.py
Python
gpl-3.0
37,964
[ "ASE", "GPAW" ]
e2c74a61b6f6b3acc748de0e622f5993c2888681dc4903d14d02ed955a57e906
# coding: utf-8 # Copyright (c) Pymatgen Development Team. # Distributed under the terms of the MIT License. from __future__ import division, unicode_literals """ Created on Mar 18, 2012 """ __author__ = "Shyue Ping Ong" __copyright__ = "Copyright 2012, The Materials Project" __version__ = "0.1" __maintainer__ = "Shyue Ping Ong" __email__ = "shyue@mit.edu" __date__ = "Mar 18, 2012" import unittest import os from pymatgen.apps.borg.hive import VaspToComputedEntryDrone, \ SimpleVaspToComputedEntryDrone, GaussianToComputedEntryDrone from pymatgen.entries.computed_entries import ComputedStructureEntry from pymatgen.entries.compatibility import MITCompatibility class VaspToComputedEntryDroneTest(unittest.TestCase): def setUp(self): self.test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..", 'test_files') self.drone = VaspToComputedEntryDrone(data=["efermi"]) self.structure_drone = VaspToComputedEntryDrone(True) def test_get_valid_paths(self): for path in os.walk(self.test_dir): if path[0] == self.test_dir: self.assertTrue(len(self.drone.get_valid_paths(path)) > 0) def test_assimilate(self): entry = self.drone.assimilate(self.test_dir) for p in ["hubbards", "is_hubbard", "potcar_spec", "run_type"]: self.assertIn(p, entry.parameters) self.assertAlmostEqual(entry.data["efermi"], -6.62148548) self.assertEqual(entry.composition.reduced_formula, "Xe") self.assertAlmostEqual(entry.energy, 0.5559329) entry = self.structure_drone.assimilate(self.test_dir) self.assertEqual(entry.composition.reduced_formula, "Xe") self.assertAlmostEqual(entry.energy, 0.5559329) self.assertIsInstance(entry, ComputedStructureEntry) self.assertIsNotNone(entry.structure) self.assertEqual(len(entry.parameters["history"]), 2) def test_to_from_dict(self): d = self.structure_drone.as_dict() drone = VaspToComputedEntryDrone.from_dict(d) self.assertEqual(type(drone), VaspToComputedEntryDrone) class SimpleVaspToComputedEntryDroneTest(unittest.TestCase): def setUp(self): self.test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..", 'test_files') self.drone = SimpleVaspToComputedEntryDrone() self.structure_drone = SimpleVaspToComputedEntryDrone(True) def test_get_valid_paths(self): for path in os.walk(self.test_dir): if path[0] == self.test_dir: self.assertTrue(len(self.drone.get_valid_paths(path)) > 0) def test_to_from_dict(self): d = self.structure_drone.as_dict() drone = SimpleVaspToComputedEntryDrone.from_dict(d) self.assertEqual(type(drone), SimpleVaspToComputedEntryDrone) class GaussianToComputedEntryDroneTest(unittest.TestCase): def setUp(self): self.test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..", 'test_files', "molecules") self.drone = GaussianToComputedEntryDrone(data=["corrections"]) self.structure_drone = GaussianToComputedEntryDrone(True) def test_get_valid_paths(self): for path in os.walk(self.test_dir): if path[0] == self.test_dir: self.assertTrue(len(self.drone.get_valid_paths(path)) > 0) def test_assimilate(self): test_file = os.path.join(self.test_dir, "methane.log") entry = self.drone.assimilate(test_file) for p in ["functional", "basis_set", "charge", "spin_mult", 'route']: self.assertIn(p, entry.parameters) for p in ["corrections"]: self.assertIn(p, entry.data) self.assertEqual(entry.composition.reduced_formula, "H4C") self.assertAlmostEqual(entry.energy, -39.9768775602) entry = self.structure_drone.assimilate(test_file) self.assertEqual(entry.composition.reduced_formula, "H4C") self.assertAlmostEqual(entry.energy, -39.9768775602) self.assertIsInstance(entry, ComputedStructureEntry) self.assertIsNotNone(entry.structure) for p in ["properly_terminated", "stationary_type"]: self.assertIn(p, entry.data) def test_to_from_dict(self): d = self.structure_drone.as_dict() drone = GaussianToComputedEntryDrone.from_dict(d) self.assertEqual(type(drone), GaussianToComputedEntryDrone) if __name__ == "__main__": #import sys;sys.argv = ['', 'Test.testName'] unittest.main()
matk86/pymatgen
pymatgen/apps/borg/tests/test_hive.py
Python
mit
4,645
[ "pymatgen" ]
9b7408593fa7edac95d113d391ecf51c5f4b9fcdacecc3f8ecabb2374e99833c
# # -*- coding: utf-8 -*- # # Gramps - a GTK+/GNOME based genealogy program # # Copyright (C) 2000-2006 Donald N. Allingham # Copyright (C) 2007-2009 Brian G. Matherly # Copyright (C) 2008 Raphael Ackermann # 2002-2003 Donald A. Peterson # 2003 Alex Roitman # 2009 Benny Malengier # 2010 Peter Landgren # Copyright (C) 2011 Adam Stein <adam@csh.rit.edu> # 2011-2012 Harald Rosemann # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # # $Id$ """LaTeX document generator""" #------------------------------------------------------------------------ # # python modules # #------------------------------------------------------------------------ from gramps.gen.ggettext import gettext as _ from bisect import bisect import re, os, sys #----------------------------------------------------------------------- - # # gramps modules # #------------------------------------------------------------------------ from gramps.gen.plug.docgen import BaseDoc, TextDoc, PAPER_LANDSCAPE, FONT_SANS_SERIF, URL_PATTERN from gramps.gen.plug.docbackend import DocBackend import Image _CLICKABLE = r'''\url{\1}''' #------------------------------------------------------------------------ # # Special settings for LaTeX output # #------------------------------------------------------------------------ # For an interim mark e.g. for an intended linebreak I use a special pattern. # It shouldn't interfere with normal text. In LaTeX charackter '&' is used # for column separation in tables and may occur there in series. The pattern # is used here before column separation is set. On the other hand incoming # text can't show this pattern for it would have been replaced by '\&\&'. # So the choosen pattern will do the job without confusion: SEPARATION_PAT = '&&' #------------------------------------------------------------------------ # # Latex Article Template # #------------------------------------------------------------------------ _LATEX_TEMPLATE_1 = '\\documentclass[%s]{article}\n' _LATEX_TEMPLATE = '''% % \\usepackage[T1]{fontenc}% % % We use latin1 encoding at a minimum by default. % GRAMPS uses unicode UTF-8 encoding for its % international support. LaTeX can deal gracefully % with unicode encoding by using the ucs style invoked % when utf8 is specified as an option to the inputenc % package. This package is included by default in some % installations, but not in others, so we do not make it % the default. Uncomment the second line if you wish to use it % (If you do not have ucs.sty, you may obtain it from % http://www.tug.org/tex-archive/macros/latex/contrib/supported/unicode/) % \\usepackage[latin1]{inputenc}% %\\usepackage[latin1,utf8]{inputenc}% \\usepackage{graphicx}% Extended graphics support \\usepackage{longtable}% For multi-page tables \\usepackage{calc}% For some calculations \\usepackage{ifthen}% For table width calculations \\usepackage{ragged2e}% For left aligning with hyphenation \\usepackage{wrapfig}% wrap pictures in text % % Depending on your LaTeX installation, the margins may be too % narrow. This can be corrected by uncommenting the following % two lines and adjusting the width appropriately. The example % removes 0.5in from each margin. (Adds 1 inch to the text) %\\addtolength{\\oddsidemargin}{-0.5in}% %\\addtolength{\\textwidth}{1.0in}% % % Vertical spacing between paragraphs: % take one of three possibilities or modify to your taste: %\\setlength{\\parskip}{1.0ex plus0.2ex minus0.2ex}% \\setlength{\\parskip}{1.5ex plus0.3ex minus0.3ex}% %\\setlength{\\parskip}{2.0ex plus0.4ex minus0.4ex}% % % Vertical spacing between lines: % take one of three possibilities or modify to your taste: \\renewcommand{\\baselinestretch}{1.0}% %\\renewcommand{\\baselinestretch}{1.1}% %\\renewcommand{\\baselinestretch}{1.2}% % % Indentation; substitute for '1cm' of gramps, 2.5em is right for 12pt % take one of three possibilities or modify to your taste: \\newlength{\\grbaseindent}% %\\setlength{\\grbaseindent}{3.0em}% \\setlength{\\grbaseindent}{2.5em}% %\\setlength{\\grbaseindent}{2.0em}% % % % ------------------------------------------------------------- % New lengths, counters and commands for calculations in tables % ------------------------------------------------------------- % \\newlength{\\grtabwidth}% \\newlength{\\grtabprepos}% \\newlength{\\grreqwidth}% \\newlength{\\grtempwd}% \\newlength{\\grmaxwidth}% \\newlength{\\grprorated}% \\newlength{\\grxwd}% \\newlength{\\grwidthused}% \\newlength{\\grreduce}% \\newlength{\\grcurcolend}% \\newlength{\\grspanwidth}% \\newlength{\\grleadlabelwidth}% \\newlength{\\grminpgindent}% \\newlength{\\grlistbacksp}% \\newlength{\\grpictsize}% \\newlength{\\grmaxpictsize}% \\newlength{\\grtextsize}% \\newlength{\\grmaxtextsize}% \\newcounter{grtofixcnt}% \\newcounter{grxwdcolcnt}% % % \\newcommand{\\grinitlength}[2]{% \\ifthenelse{\\isundefined{#1}}% {\\newlength{#1}}{}% \\setlength{#1}{#2}% }% % \\newcommand{\\grinittab}[2]{% #1: tabwidth, #2 = 1.0/anz-cols \\setlength{\\grtabwidth}{#1}% \\setlength{\\grprorated}{#2\\grtabwidth}% \\setlength{\\grwidthused}{0em}% \\setlength{\\grreqwidth}{0em}% \\setlength{\\grmaxwidth }{0em}% \\setlength{\\grxwd}{0em}% \\setlength{\\grtempwd}{0em}% \\setlength{\\grpictsize}{0em}% \\setlength{\\grmaxpictsize}{0em}% \\setlength{\\grtextsize}{0em}% \\setlength{\\grmaxtextsize}{0em}% \\setlength{\\grcurcolend}{0em}% \\setcounter{grxwdcolcnt}{0}% \\setcounter{grtofixcnt}{0}% number of wide cols% \\grinitlength{\\grcolbega}{0em}% beg of first col }% % \\newcommand{\\grmaxvaltofirst}[2]{% \\ifthenelse{\\lengthtest{#1 < #2}}% {\\setlength{#1}{#2}}{}% }% % \\newcommand{\\grsetreqfull}{% \\grmaxvaltofirst{\\grmaxpictsize}{\\grpictsize}% \\grmaxvaltofirst{\\grmaxtextsize}{\\grtextsize}% }% % \\newcommand{\\grsetreqpart}[1]{% \\addtolength{\\grtextsize}{#1 - \\grcurcolend}% \\addtolength{\\grpictsize}{#1 - \\grcurcolend}% \\grsetreqfull% }% % \\newcommand{\\grdividelength}{% \\setlength{\\grtempwd}{\\grtabwidth - \\grwidthused}% % rough division of lengths: % if 0 < #1 <= 10: \\grxwd = ~\\grtempwd / grtofixcnt % otherwise: \\grxwd = \\grprorated \\ifthenelse{\\value{grtofixcnt} > 0}% {\\ifthenelse{\\value{grtofixcnt}=1}% {\\setlength{\\grxwd}{\\grtempwd}}{% \\ifthenelse{\\value{grtofixcnt}=2} {\\setlength{\\grxwd}{0.5\\grtempwd}}{% \\ifthenelse{\\value{grtofixcnt}=3} {\\setlength{\\grxwd}{0.333\\grtempwd}}{% \\ifthenelse{\\value{grtofixcnt}=4} {\\setlength{\\grxwd}{0.25\\grtempwd}}{% \\ifthenelse{\\value{grtofixcnt}=5} {\\setlength{\\grxwd}{0.2\\grtempwd}}{% \\ifthenelse{\\value{grtofixcnt}=6} {\\setlength{\\grxwd}{0.166\\grtempwd}}{% \\ifthenelse{\\value{grtofixcnt}=7} {\\setlength{\\grxwd}{0.143\\grtempwd}}{% \\ifthenelse{\\value{grtofixcnt}=8} {\\setlength{\\grxwd}{0.125\\grtempwd}}{% \\ifthenelse{\\value{grtofixcnt}=9} {\\setlength{\\grxwd}{0.111\\grtempwd}}{% \\ifthenelse{\\value{grtofixcnt}=10} {\\setlength{\\grxwd}{0.1\\grtempwd}}{% \\setlength{\\grxwd}{\\grprorated}% give up, take \\grprorated% }}}}}}}}}}% \\setlength{\\grreduce}{0em}% }{\\setlength{\\grxwd}{0em}}% }% % \\newcommand{\\grtextneedwidth}[1]{% \\settowidth{\\grtempwd}{#1}% \\grmaxvaltofirst{\\grtextsize}{\\grtempwd}% }% % \\newcommand{\\grcolsfirstfix}[5]{% \\grinitlength{#1}{\\grcurcolend}% \\grinitlength{#3}{0em}% \\grinitlength{#4}{\\grmaxpictsize}% \\grinitlength{#5}{\\grmaxtextsize}% \\grinitlength{#2}{#5}% \\grmaxvaltofirst{#2}{#4}% \\addtolength{#2}{2\\tabcolsep}% \\grmaxvaltofirst{\\grmaxwidth}{#2}% \\ifthenelse{\\lengthtest{#2 < #4} \\or \\lengthtest{#2 < \\grprorated}}% { \\setlength{#3}{#2}% \\addtolength{\\grwidthused}{#2} }% { \\stepcounter{grtofixcnt} }% \\addtolength{\\grcurcolend}{#2}% }% % \\newcommand{\\grcolssecondfix}[4]{% \\ifthenelse{\\lengthtest{\\grcurcolend < \\grtabwidth}}% { \\setlength{#3}{#2} }% { \\addtolength{#1}{-\\grreduce}% \\ifthenelse{\\lengthtest{#2 = \\grmaxwidth}}% { \\stepcounter{grxwdcolcnt}}% { \\ifthenelse{\\lengthtest{#3 = 0em} \\and % \\lengthtest{#4 > 0em}}% { \\setlength{\\grtempwd}{#4}% \\grmaxvaltofirst{\\grtempwd}{\\grxwd}% \\addtolength{\\grreduce}{#2 - \\grtempwd}% \\setlength{#2}{\\grtempwd}% \\addtolength{\\grwidthused}{#2}% \\addtocounter{grtofixcnt}{-1}% \\setlength{#3}{#2}% }{}% }% }% }% % \\newcommand{\\grcolsthirdfix}[3]{% \\ifthenelse{\\lengthtest{\\grcurcolend < \\grtabwidth}}% {}{ \\addtolength{#1}{-\\grreduce}% \\ifthenelse{\\lengthtest{#3 = 0em} \\and % \\lengthtest{#2 < \\grmaxwidth}}% { \\ifthenelse{\\lengthtest{#2 < 0.5\\grmaxwidth}}% { \\setlength{\\grtempwd}{0.5\\grxwd}% \\grmaxvaltofirst{\\grtempwd}{0.7\\grprorated}}% { \\setlength{\\grtempwd}{\\grxwd}}% \\addtolength{\\grreduce}{#2 - \\grtempwd}% \\setlength{#2}{\\grtempwd}% \\addtolength{\\grwidthused}{#2}% \\addtocounter{grtofixcnt}{-1}% \\setlength{#3}{#2}% }{}% }% }% % \\newcommand{\\grcolsfourthfix}[3]{% \\ifthenelse{\\lengthtest{\\grcurcolend < \\grtabwidth}}% {}{ \\addtolength{#1}{-\\grreduce}% \\ifthenelse{\\lengthtest{#3 = 0em}}% { \\addtolength{\\grreduce}{#2 - \\grxwd}% \\setlength{#2}{\\grxwd}% \\setlength{#3}{#2}% }{}% }% }% % \\newcommand{\\grgetspanwidth}[4]{% \\grinitlength{#1}{#3 - #2 + #4}% }% % \\newcommand{\\tabheadstrutceil}{% \\rule[0.0ex]{0.00em}{3.5ex}}% \\newcommand{\\tabheadstrutfloor}{% \\rule[-2.0ex]{0.00em}{2.5ex}}% \\newcommand{\\tabrowstrutceil}{% \\rule[0.0ex]{0.00em}{2.9ex}}% \\newcommand{\\tabrowstrutfloor}{% \\rule[-0.1ex]{0.00em}{2.0ex}}% % \\newcommand{\\grempty}[1]{}% % \\newcommand{\\graddvdots}[1]{% \\hspace*{\\fill}\\hspace*{\\fill}\\raisebox{#1}{\\vdots}% }% % \\newcommand{\\grtabpgbreak}[4]{% #1 { \\parbox[t]{ #2 - 2\\tabcolsep}{\\tabheadstrutceil\\hspace*{\\fill}% \\raisebox{#4}{\\vdots} #3{#4} \\hspace*{\\fill}\\tabheadstrutfloor}}% }% % \\newcommand{\\grcolpart}[3]{% #1 { \\parbox[t]{ #2 - 2\\tabcolsep}% {\\tabrowstrutceil #3~\\\\[-1.6ex]\\tabrowstrutfloor}}% }% % \\newcommand{\\grminpghead}[2]{% \\setlength{\\grminpgindent}{#1\\grbaseindent-\\grlistbacksp}% \\hspace*{\\grminpgindent}% \\ifthenelse{\\not \\lengthtest{#2em > 0em}}% {\\begin{minipage}[t]{\\textwidth -\\grminpgindent}}% {\\begin{minipage}[t]{\\textwidth -\\grminpgindent% -#2\\grbaseindent -4\\tabcolsep}}% }% % \\newcommand{\\grminpgtail}{% \\end{minipage}\\parindent0em% }% % \\newcommand{\\grlisthead}[1]{% \\begin{list}{#1}% { \\setlength{\\labelsep}{0.5em}% \\setlength{\\labelwidth}{\\grleadlabelwidth}% \\setlength{\\leftmargin}{\\grlistbacksp}% }\\item% }% % \\newcommand{\\grlisttail}{% \\end{list}% }% % \\newcommand{\\grprepleader}[1]{% \\settowidth{\\grtempwd}{#1}% \\ifthenelse{\\lengthtest{\\grtempwd > \\grleadlabelwidth}}% { \\setlength{\\grleadlabelwidth}{\\grtempwd}}{}% \\setlength{\\grlistbacksp}{\\grleadlabelwidth + 1.0em}% }% % \\newcommand{\\grprepnoleader}{% \\setlength{\\grleadlabelwidth}{0em}% \\setlength{\\grlistbacksp}{0em}% }% % \\newcommand{\\grmkpicture}[4]{% \\begin{wrapfigure}{r}{#2\\grbaseindent}% \\vspace{-6ex}% \\begin{center}% \\includegraphics[% width= #2\\grbaseindent,% height= #3\\grbaseindent,% keepaspectratio]% {#1}\\\\% {\\RaggedRight\\footnotesize#4}% \\end{center}% \\end{wrapfigure}% \\settowidth{\\grtempwd}{\\footnotesize#4}% \\setlength{\\grxwd}{#2\\grbaseindent}% \\ifthenelse{\\lengthtest{\\grtempwd < 0.7\\grxwd}}% {\\setlength{\\grxwd}{1ex}}{% \\ifthenelse{\\lengthtest{\\grtempwd < 1.2\\grxwd}}% {\\setlength{\\grxwd}{2ex}}{% \\ifthenelse{\\lengthtest{\\grtempwd < 1.8\\grxwd}}% {\\setlength{\\grxwd}{6ex}}{% \\ifthenelse{\\lengthtest{\\grtempwd < 2.0\\grxwd}}% {\\setlength{\\grxwd}{10ex}}{% \\setlength{\\grxwd}{12ex}}% }}}% \\setlength{\\grtempwd}{#3\\grbaseindent + \\grxwd}% \\rule[-\\grtempwd]{0pt}{\\grtempwd}% \\setlength{\\grtabprepos}{-\\grtempwd}% }% % % \\begin{document}% ''' #------------------------------------------------------------------------ # # Font size table and function # #------------------------------------------------------------------------ # These tables correlate font sizes to Latex. The first table contains # typical font sizes in points. The second table contains the standard # Latex font size names. Since we use bisect to map the first table to the # second, we are guaranteed that any font less than 6 points is 'tiny', fonts # from 6-7 points are 'script', etc. and fonts greater than or equal to 22 # are considered 'Huge'. Note that fonts from 12-13 points are not given a # Latex font size name but are considered "normal." _FONT_SIZES = [6, 8, 10, 12, 14, 16, 18, 20, 22] _FONT_NAMES = ['tiny', 'scriptsize', 'footnotesize', 'small', '', 'large', 'Large', 'LARGE', 'huge', 'Huge'] def map_font_size(fontsize): """ Map font size in points to Latex font size """ return _FONT_NAMES[bisect(_FONT_SIZES, fontsize)] #------------------------------------------------------------------------ # # auxiliaries to facilitate table construction # #------------------------------------------------------------------------ # patterns for regular expressions, module re: TBLFMT_PAT = re.compile(r'({\|?)l(\|?})') # constants for routing in table construction: (CELL_BEG, CELL_TEXT, CELL_END, ROW_BEG, ROW_END, TAB_BEG, TAB_END) = range(7) FIRST_ROW, SUBSEQ_ROW = range(2) def get_charform(col_num): """ Transfer column number to column charakter, limited to letters within a-z; 26, there is no need for more. early test of column count in start_table() """ if col_num > ord('z') - ord('a'): raise ValueError, ''.join(( '\n number of table columns is ', repr(col_num), '\n should be <= ', repr(ord('z') - ord('a')))) return chr(ord('a') + col_num) def get_numform(col_char): return ord(col_char) - ord('a') #------------------------------------------ # row_alph_counter = str_incr(MULTCOL_COUNT_BASE) # # 'aaa' is sufficient for up to 17576 multicolumns in each table; # do you need more? # uncomment one of the two lines MULTCOL_COUNT_BASE = 'aaa' # MULTCOL_COUNT_BASE = 'aaaa' #------------------------------------------ def str_incr(str_counter): """ for counting table rows """ lili = list(str_counter) while 1: yield ''.join(lili) if ''.join(lili) == len(lili)*'z': raise ValueError, ''.join(( '\n can\'t increment string ', ''.join(lili), ' of length ', str(len(lili)))) for i in range(len(lili)-1, -1, -1): if lili[i] < 'z': lili[i] = chr(ord(lili[i])+1) break else: lili[i] = 'a' #------------------------------------------------------------------------ # # Structure of Table-Memory # #------------------------------------------------------------------------ class Tab_Cell(): def __init__(self, colchar, span, head, content): self.colchar = colchar self.span = span self.head = head self.content = content class Tab_Row(): def __init__(self): self.cells =[] self.tail = '' self.addit = '' # for: \\hline, \\cline{} class Tab_Mem(): def __init__(self, head): self.head = head self.tail ='' self.rows =[] #------------------------------------------------------------------------ # # Functions for docbackend # #------------------------------------------------------------------------ def latexescape(text): """ change text in text that latex shows correctly special characters: \& \$ \% \# \_ \{ \} """ text = text.replace('&','\\&') text = text.replace('$','\\$') text = text.replace('%','\\%') text = text.replace('#','\\#') text = text.replace('_','\\_') text = text.replace('{','\\{') text = text.replace('}','\\}') # replace character unknown to LaTeX text = text.replace('→','$\\longrightarrow$') return text def latexescapeverbatim(text): """ change text in text that latex shows correctly respecting whitespace special characters: \& \$ \% \# \_ \{ \} Now also make sure space and newline is respected """ text = text.replace('&', '\\&') text = text.replace('$', '\\$') text = text.replace('%', '\\%') text = text.replace('#', '\\#') text = text.replace('_', '\\_') text = text.replace('{', '\\{') text = text.replace('}', '\\}') text = text.replace(' ', '\\ ') text = text.replace('\n', '~\\newline \n') #spaces at begin are normally ignored, make sure they are not. #due to above a space at begin is now \newline\n\ text = text.replace('\\newline\n\\ ', '\\newline\n\\hspace*{0.1\\grbaseindent}\\ ') # replace character unknown to LaTeX text = text.replace('→','$\\longrightarrow$') return text #------------------------------------------------------------------------ # # Document Backend class for cairo docs # #------------------------------------------------------------------------ class LateXBackend(DocBackend): """ Implementation of docbackend for latex docs. File and File format management for latex docs """ # overwrite base class attributes, they become static var of LaTeXDoc SUPPORTED_MARKUP = [ DocBackend.BOLD, DocBackend.ITALIC, DocBackend.UNDERLINE, DocBackend.FONTSIZE, DocBackend.FONTFACE, DocBackend.SUPERSCRIPT ] STYLETAG_MARKUP = { DocBackend.BOLD : ("\\textbf{", "}"), DocBackend.ITALIC : ("\\textit{", "}"), DocBackend.UNDERLINE : ("\\underline{", "}"), DocBackend.SUPERSCRIPT : ("\\textsuperscript{", "}"), } ESCAPE_FUNC = lambda x: latexescape def setescape(self, preformatted=False): """ Latex needs two different escape functions depending on the type. This function allows to switch the escape function """ if not preformatted: LateXBackend.ESCAPE_FUNC = lambda x: latexescape else: LateXBackend.ESCAPE_FUNC = lambda x: latexescapeverbatim def _create_xmltag(self, type, value): """ overwrites the method in DocBackend. creates the latex tags needed for non bool style types we support: FONTSIZE : use different \large denomination based on size : very basic, in mono in the font face then we use {\ttfamily } """ if type not in self.SUPPORTED_MARKUP: return None elif type == DocBackend.FONTSIZE: #translate size in point to something LaTeX can work with fontsize = map_font_size(value) if fontsize: return ("{\\" + fontsize + ' ', "}") else: return ("", "") elif type == DocBackend.FONTFACE: if 'MONO' in value.upper(): return ("{\\ttfamily ", "}") elif 'ROMAN' in value.upper(): return ("{\\rmfamily ", "}") return None def _checkfilename(self): """ Check to make sure filename satisfies the standards for this filetype """ if not self._filename.endswith(".tex"): self._filename = self._filename + ".tex" #------------------------------------------------------------------------ # # Paragraph Handling # #------------------------------------------------------------------------ class TexFont(object): def __init__(self, style=None): if style: self.font_beg = style.font_beg self.font_end = style.font_end self.leftIndent = style.left_indent self.firstLineIndent = style.firstLineIndent else: self.font_beg = "" self.font_end = "" self.leftIndent = "" self.firstLineIndent = "" #------------------------------------------------------------------ # # LaTeXDoc # #------------------------------------------------------------------ class LaTeXDoc(BaseDoc, TextDoc): """LaTeX document interface class. Derived from BaseDoc""" # --------------------------------------------------------------- # some additional variables # --------------------------------------------------------------- in_table = False in_multrow_cell = False # for tab-strukt: cols of rows pict = '' pict_in_table = False pict_width = 0 pict_height = 0 textmem = [] in_title = True # --------------------------------------------------------------- # begin of table special treatment # --------------------------------------------------------------- def emit(self, text, tab_state=CELL_TEXT, span=1): """ Hand over all text but tables to self._backend.write(), (line 1-2). In case of tables pass to specal treatment below. """ if not self.in_table: # all stuff but table self._backend.write(text) else: self.handle_table(text, tab_state, span) def handle_table(self, text, tab_state, span): """ Collect tables elements in an adequate cell/row/table structure and call for LaTeX width calculations and writing out """ if tab_state == CELL_BEG: # here text is head self.textmem = [] self.curcol_char = get_charform(self.curcol-1) if span > 1: # phantom columns prior to multicolumns for col in range(self.curcol - span, self.curcol - 1): col_char = get_charform(col) phantom = Tab_Cell(col_char, 0, '', '') self.tabrow.cells.append(phantom) self.tabcell = Tab_Cell(self.curcol_char, span, text, '') elif tab_state == CELL_TEXT: self.textmem.append(text) elif tab_state == CELL_END: # text == '' self.tabcell.content = ''.join(self.textmem).strip() if self.tabcell.content.find('\\centering') != -1: self.tabcell.content = self.tabcell.content.replace( '\\centering', '') self.tabcell.head = re.sub( TBLFMT_PAT, '\\1c\\2', self.tabcell.head) self.tabrow.cells.append(self.tabcell) self.textmem = [] elif tab_state == ROW_BEG: self.tabrow = Tab_Row() elif tab_state == ROW_END: self.tabrow.addit = text # text: \\hline, \\cline{} self.tabrow.tail = ''.join(self.textmem) # \\\\ row-termination if self.in_multrow_cell: # cols of rows: convert to rows of cols self.repack_row() else: self.tabmem.rows.append(self.tabrow) elif tab_state == TAB_BEG: # text: \\begin{longtable}[l]{ self._backend.write(''.join(('\\grinittab{\\textwidth}{', repr(1.0/self.numcols), '}%\n'))) self.tabmem = Tab_Mem(text) elif tab_state == TAB_END: # text: \\end{longtable} self.tabmem.tail = text # table completed, calc widths and write out self.calc_latex_widths() self.write_table() def repack_row(self): """ Transpose contents contained in a row of cols of cells to rows of cells with corresponding contents. Cols of the mult-row-cell are ended by SEPARATION_PAT """ # if last col empty: delete if self.tabrow.cells[-1].content == '': del self.tabrow.cells[-1] self.numcols -= 1 # extract cell.contents bare_contents = [cell.content.strip(SEPARATION_PAT).replace( '\n', '').split(SEPARATION_PAT) for cell in self.tabrow.cells] # mk equal length & transpose num_new_rows = max([len(mult_row_cont) for mult_row_cont in bare_contents]) cols_equ_len = [] for mrc in bare_contents: for i in range(num_new_rows - len(mrc)): mrc.append('') cols_equ_len.append(mrc) transp_cont = zip(*cols_equ_len) # picts? extract first_cell, last_cell = (0, self.numcols) if self.pict_in_table: if transp_cont[0][-1].startswith('\\grmkpicture'): self.pict = transp_cont[0][-1] last_cell -= 1 self.numcols -= 1 self._backend.write(''.join(('\\addtolength{\\grtabwidth}{-', repr(self.pict_width), '\\grbaseindent -2\\tabcolsep}%\n'))) self.pict_in_table = False # new row-col structure for row in range(num_new_rows): new_row = Tab_Row() for i in range(first_cell, last_cell): new_cell = Tab_Cell(get_charform(i + first_cell), self.tabrow.cells[i].span, self.tabrow.cells[i].head, transp_cont[row][i + first_cell]) new_row.cells.append(new_cell) new_row.tail = self.tabrow.tail new_row.addit = '' self.tabmem.rows.append(new_row) self.tabmem.rows[-1].addit = self.tabrow.addit self.in_multrow_cell = False return def calc_latex_widths(self): """ Control width settings in latex table construction Evaluations are set up here and passed to LaTeX to calculate required and to fix suitable widths. ??? Can all this be done exclusively in TeX? Don't know how. """ tabcol_chars = [] for col_num in range(self.numcols): col_char = get_charform(col_num) tabcol_chars.append(col_char) for row in self.tabmem.rows: cell = row.cells[col_num] if cell.span == 0: continue if cell.content.startswith('\\grmkpicture'): self._backend.write(''.join(('\\setlength{\\grpictsize}{', self.pict_width, '\\grbaseindent}%\n'))) else: for part in cell.content.split(SEPARATION_PAT): self._backend.write(''.join(('\\grtextneedwidth{', part, '}%\n'))) row.cells[col_num].content = cell.content.replace( SEPARATION_PAT, '~\\newline \n') if cell.span == 1: self._backend.write(''.join(('\\grsetreqfull%\n'))) elif cell.span > 1: self._backend.write(''.join(('\\grsetreqpart{\\grcolbeg', get_charform(get_numform(cell.colchar) - cell.span +1), '}%\n'))) self._backend.write(''.join(('\\grcolsfirstfix', ' {\\grcolbeg', col_char, '}{\\grtempwidth', col_char, '}{\\grfinalwidth', col_char, '}{\\grpictreq', col_char, '}{\\grtextreq', col_char, '}%\n'))) self._backend.write(''.join(('\\grdividelength%\n'))) for col_char in tabcol_chars: self._backend.write(''.join(('\\grcolssecondfix', ' {\\grcolbeg', col_char, '}{\\grtempwidth', col_char, '}{\\grfinalwidth', col_char, '}{\\grpictreq', col_char, '}%\n'))) self._backend.write(''.join(('\\grdividelength%\n'))) for col_char in tabcol_chars: self._backend.write(''.join(('\\grcolsthirdfix', ' {\\grcolbeg', col_char, '}{\\grtempwidth', col_char, '}{\\grfinalwidth', col_char, '}%\n'))) self._backend.write(''.join(('\\grdividelength%\n'))) for col_char in tabcol_chars: self._backend.write(''.join(('\\grcolsfourthfix', ' {\\grcolbeg', col_char, '}{\\grtempwidth', col_char, '}{\\grfinalwidth', col_char, '}%\n'))) self.multcol_alph_counter = str_incr(MULTCOL_COUNT_BASE) for row in self.tabmem.rows: for cell in row.cells: if cell.span > 1: multcol_alph_id = self.multcol_alph_counter.next() self._backend.write(''.join(('\\grgetspanwidth{', '\\grspanwidth', multcol_alph_id, '}{\\grcolbeg', get_charform(get_numform(cell.colchar)- cell.span + 1), '}{\\grcolbeg', cell.colchar, '}{\\grtempwidth', cell.colchar, '}%\n'))) def write_table(self): # Choosing RaggedRight (with hyphenation) in table and # provide manually adjusting of column widths self._backend.write(''.join(( '%\n', self.pict, '%\n%\n', '% ==> Comment out one of the two lines ', 'by a leading "%" (first position)\n', '{ \\RaggedRight% left align with hyphenation in table \n', '%{% no left align in table \n%\n', '% ==> You may add pos or neg values ', 'to the following ', repr(self.numcols), ' column widths %\n'))) for col_num in range(self.numcols): self._backend.write(''.join(('\\addtolength{\\grtempwidth', get_charform(col_num), '}{+0.0cm}%\n'))) self._backend.write('% === %\n') # adjust & open table': if self.pict: self._backend.write(''.join(('%\n\\vspace{\\grtabprepos}%\n', '\\setlength{\\grtabprepos}{0ex}%\n'))) self.pict = '' self._backend.write(''.join(self.tabmem.head)) # special treatment at begin of longtable for heading and # closing at top and bottom of table # and parts of it at pagebreak separating self.multcol_alph_counter = str_incr(MULTCOL_COUNT_BASE) splitting_row = self.mk_splitting_row(self.tabmem.rows[FIRST_ROW]) self.multcol_alph_counter = str_incr(MULTCOL_COUNT_BASE) complete_row = self.mk_complete_row(self.tabmem.rows[FIRST_ROW]) self._backend.write(splitting_row) self._backend.write('\\endhead%\n') self._backend.write(splitting_row.replace('{+2ex}', '{-2ex}')) self._backend.write('\\endfoot%\n') if self.head_line: self._backend.write('\\hline%\n') self.head_line= False else: self._backend.write('%\n') self._backend.write(complete_row) self._backend.write('\\endfirsthead%\n') self._backend.write('\\endlastfoot%\n') # hand over subsequent rows for row in self.tabmem.rows[SUBSEQ_ROW:]: self._backend.write(self.mk_complete_row(row)) # close table by '\\end{longtable}', end '{\\RaggedRight' or '{' by '}' self._backend.write(''.join((''.join(self.tabmem.tail), '}%\n\n'))) def mk_splitting_row(self, row): splitting =[] add_vdots = '\\grempty' for cell in row.cells: if cell.span == 0: continue if (not splitting and get_numform(cell.colchar) == self.numcols - 1): add_vdots = '\\graddvdots' if cell.span == 1: cell_width = ''.join(('\\grtempwidth', cell.colchar)) else: cell_width = ''.join(('\\grspanwidth', self.multcol_alph_counter.next())) splitting.append(''.join(('\\grtabpgbreak{', cell.head, '}{', cell_width, '}{', add_vdots, '}{+2ex}%\n'))) return ''.join((' & '.join(splitting), '%\n', row.tail)) def mk_complete_row(self, row): complete =[] for cell in row.cells: if cell.span == 0: continue elif cell.span == 1: cell_width = ''.join(('\\grtempwidth', cell.colchar)) else: cell_width = ''.join(('\\grspanwidth', self.multcol_alph_counter.next())) complete.append(''.join(('\\grcolpart{%\n ', cell.head, '}{%\n', cell_width, '}{%\n ', cell.content, '%\n}%\n'))) return ''.join((' & '.join(complete), '%\n', row.tail, row.addit)) # --------------------------------------------------------------------- # end of special table treatment # --------------------------------------------------------------------- def page_break(self): "Forces a page break, creating a new page" self.emit('\\newpage%\n') def open(self, filename): """Opens the specified file, making sure that it has the extension of .tex""" self._backend = LateXBackend(filename) self._backend.open() # Font size control seems to be limited. For now, ignore # any style constraints, and use 12pt has the default options = "12pt" if self.paper.get_orientation() == PAPER_LANDSCAPE: options = options + ",landscape" # Paper selections are somewhat limited on a stock installation. # If the user picks something not listed here, we'll just accept # the default of the user's LaTeX installation (usually letter). paper_name = self.paper.get_size().get_name().lower() if paper_name in ["a4", "a5", "legal", "letter"]: options += ',' + paper_name + 'paper' # Use the article template, T1 font encodings, and specify # that we should use Latin1 and unicode character encodings. self.emit(_LATEX_TEMPLATE_1 % options) self.emit(_LATEX_TEMPLATE) self.in_list = False self.in_table = False self.head_line = False #Establish some local styles for the report self.latexstyle = {} self.latex_font = {} style_sheet = self.get_style_sheet() for style_name in style_sheet.get_paragraph_style_names(): style = style_sheet.get_paragraph_style(style_name) font = style.get_font() size = font.get_size() self.latex_font[style_name] = TexFont() thisstyle = self.latex_font[style_name] thisstyle.font_beg = "" thisstyle.font_end = "" # Is there special alignment? (default is left) align = style.get_alignment_text() if align == "center": thisstyle.font_beg += "{\\centering" thisstyle.font_end = ''.join(("\n\n}", thisstyle.font_end)) elif align == "right": thisstyle.font_beg += "\\hfill" # Establish font face and shape if font.get_type_face() == FONT_SANS_SERIF: thisstyle.font_beg += "\\sffamily" thisstyle.font_end = "\\rmfamily" + thisstyle.font_end if font.get_bold(): thisstyle.font_beg += "\\bfseries" thisstyle.font_end = "\\mdseries" + thisstyle.font_end if font.get_italic() or font.get_underline(): thisstyle.font_beg += "\\itshape" thisstyle.font_end = "\\upshape" + thisstyle.font_end # Now determine font size fontsize = map_font_size(size) if fontsize: thisstyle.font_beg += "\\" + fontsize thisstyle.font_end += "\\normalsize" thisstyle.font_beg += " " thisstyle.font_end += " " left = style.get_left_margin() first = style.get_first_indent() + left thisstyle.leftIndent = left thisstyle.firstLineIndent = first self.latexstyle[style_name] = thisstyle def close(self): """Clean up and close the document""" if self.in_list: self.emit('\\end{list}\n') self.emit('\\end{document}\n') self._backend.close() def end_page(self): """Issue a new page command""" self.emit('\\newpage') def start_paragraph(self, style_name, leader=None): """Paragraphs handling - A Gramps paragraph is any single body of text from a single word to several sentences. We assume a linebreak at the end of each paragraph.""" style_sheet = self.get_style_sheet() style = style_sheet.get_paragraph_style(style_name) ltxstyle = self.latexstyle[style_name] self.level = style.get_header_level() self.fbeg = ltxstyle.font_beg self.fend = ltxstyle.font_end self.indent = ltxstyle.leftIndent self.FLindent = ltxstyle.firstLineIndent if self.indent == 0: self.indent = self.FLindent # For additional vertical space beneath title line(s) # i.e. when the first centering ended: if self.in_title and ltxstyle.font_beg.find('centering') == -1: self.in_title = False self._backend.write('\\vspace{5ex}%\n') if self.in_table: # paragraph in table indicates: cols of rows self.in_multrow_cell = True else: if leader: self._backend.write(''.join(('\\grprepleader{', leader, '}%\n'))) else: self._backend.write('\\grprepnoleader%\n') # ------------------------------------------------------------------- # Gramps presumes 'cm' as units; here '\\grbaseindent' is used # as equivalent, set in '_LATEX_TEMPLATE' above to '3em'; # there another value might be choosen. # ------------------------------------------------------------------- if self.indent is not None: self._backend.write(''.join(('\\grminpghead{', repr(self.indent), '}{', repr(self.pict_width), '}%\n'))) self.fix_indent = True if leader is not None and not self.in_list: self.in_list = True self._backend.write(''.join(('\\grlisthead{', leader, '}%\n'))) if leader is None: self.emit('\n') self.emit('%s ' % self.fbeg) def end_paragraph(self): """End the current paragraph""" newline = '%\n\n' if self.in_list: self.in_list = False self.emit('\n\\grlisttail%\n') newline = '' elif self.in_table: newline = SEPARATION_PAT self.emit('%s%s' % (self.fend, newline)) if self.fix_indent: self.emit('\\grminpgtail%\n\n') self.fix_indent = False if self.pict_width: self.pict_width = 0 self.pict_height = 0 def start_bold(self): """Bold face""" self.emit('\\textbf{') def end_bold(self): """End bold face""" self.emit('}') def start_superscript(self): self.emit('\\textsuperscript{') def end_superscript(self): self.emit('}') def start_table(self, name,style_name): """Begin new table""" self.in_table = True self.currow = 0 # We need to know a priori how many columns are in this table styles = self.get_style_sheet() self.tblstyle = styles.get_table_style(style_name) self.numcols = self.tblstyle.get_columns() tblfmt = '*{%d}{l}' % self.numcols self.emit('\\begin{longtable}[l]{%s}\n' % (tblfmt), TAB_BEG) def end_table(self): """Close the table environment""" self.emit('%\n\\end{longtable}%\n', TAB_END) self.in_table = False def start_row(self): """Begin a new row""" self.emit('', ROW_BEG) # doline/skipfirst are flags for adding hor. rules self.doline = False self.skipfirst = False self.curcol = 0 self.currow = self.currow + 1 def end_row(self): """End the row (new line)""" self.emit('\\\\ ') if self.doline: if self.skipfirst: self.emit(''.join((('\\cline{2-%d}' % self.numcols), '%\n')), ROW_END) else: self.emit('\\hline %\n', ROW_END) else: self.emit('%\n', ROW_END) self.emit('%\n') def start_cell(self, style_name, span=1): """Add an entry to the table. We always place our data inside braces for safety of formatting.""" self.colspan = span self.curcol = self.curcol + self.colspan styles = self.get_style_sheet() self.cstyle = styles.get_cell_style(style_name) # ------------------------------------------------------------------ # begin special modification for boolean values # values imported here are used for test '==1' and '!=0'. To get # local boolean values the tests are now transfered to the import lines # ------------------------------------------------------------------ self.lborder = 1 == self.cstyle.get_left_border() self.rborder = 1 == self.cstyle.get_right_border() self.bborder = 1 == self.cstyle.get_bottom_border() self.tborder = 0 != self.cstyle.get_top_border() # self.llist not needed any longer. # now column widths are arranged in self.calc_latex_widths() # serving for fitting of cell contents at any column position. # self.llist = 1 == self.cstyle.get_longlist() cellfmt = "l" # Account for vertical rules if self.lborder: cellfmt = '|' + cellfmt if self.rborder: cellfmt = cellfmt + '|' # and Horizontal rules if self.bborder: self.doline = True elif self.curcol == 1: self.skipfirst = True if self.tborder: self.head_line = True # ------------------------------------------------------------------ # end special modification for boolean values # ------------------------------------------------------------------ self.emit('\\multicolumn{%d}{%s}' % (span, cellfmt), CELL_BEG, span) def end_cell(self): """Prepares for next cell""" self.emit('', CELL_END) def add_media_object(self, infile, pos, x, y, alt='', style_name=None, crop=None): """Add photo to report""" outfile = os.path.splitext(infile)[0] pictname = latexescape(os.path.split(outfile)[1]) outfile = ''.join((outfile, '.jpg')) if infile != outfile: try: curr_img = Image.open(infile) curr_img.save(outfile) px, py = curr_img.size if py > px: y = y*py/px except IOError: self.emit(''.join(('%\n *** Error: cannot convert ', infile, '\n *** to ', outfile, '%\n'))) if self.in_table: self.pict_in_table = True self.emit(''.join(('\\grmkpicture{', outfile, '}{', repr(x), '}{', repr(y), '}{', pictname, '}%\n'))) self.pict_width = x self.pict_height = y def write_text(self,text,mark=None,links=False): """Write the text to the file""" if text == '\n': text = '' text = latexescape(text) if links == True: text = re.sub(URL_PATTERN, _CLICKABLE, text) #hard coded replace of the underline used for missing names/data text = text.replace('\\_'*13, '\\underline{\hspace{3\\grbaseindent}}') self.emit(text + ' ') def write_styled_note(self, styledtext, format, style_name, contains_html=False, links=False): """ Convenience function to write a styledtext to the latex doc. styledtext : assumed a StyledText object to write format : = 0 : Flowed, = 1 : Preformatted style_name : name of the style to use for default presentation contains_html: bool, the backend should not check if html is present. If contains_html=True, then the textdoc is free to handle that in some way. Eg, a textdoc could remove all tags, or could make sure a link is clickable. self ignores notes that contain html links: bool, make URLs clickable if True """ if contains_html: return text = str(styledtext) s_tags = styledtext.get_tags() if format: #preformatted, use different escape function self._backend.setescape(True) markuptext = self._backend.add_markup_from_styled(text, s_tags) if links == True: markuptext = re.sub(URL_PATTERN, _CLICKABLE, markuptext) markuptext = self._backend.add_markup_from_styled(text, s_tags) #there is a problem if we write out a note in a table. # .................. # now solved by postprocessing in self.calc_latex_widths() # by explicitely setting suitable width for all columns. # if format: self.start_paragraph(style_name) self.emit(markuptext) self.end_paragraph() #preformatted finished, go back to normal escape function self._backend.setescape(False) else: for line in markuptext.split('%\n%\n '): self.start_paragraph(style_name) for realline in line.split('\n'): self.emit(realline) self.emit("~\\newline \n") self.end_paragraph()
arunkgupta/gramps
gramps/plugins/docgen/latexdoc.py
Python
gpl-2.0
47,677
[ "Brian" ]
e7170347285128cfb82e7abf8ace8728bc38fb73e717d2a03fb65d966589950b
#!/usr/bin/python # (C) 2013, Markus Wildi, markus.wildi@bluewin.ch # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2, or (at your option) # any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # # Or visit http://www.gnu.org/licenses/gpl.html. # """ """ __author__ = 'markus.wildi@bluewin.ch' import sys if 'matplotlib' not in sys.modules: import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import numpy as np from scipy import optimize class TemperatureFocPosModel(object): """ Fit minimum focuser positions as function of temperature :var showPlot: if True show plot :var date: date when fit has been carried out :var comment: optional comment :var plotFn: file name where the plot is stored as PNG :var resultFitFwhm: :py:mod:`rts2saf.data.ResultFwhm` :var logger: :py:mod:`rts2saf.log` """ def __init__(self, showPlot=False, date=None, comment=None, plotFn=None, resultFitFwhm=None, logger=None): self.showPlot=showPlot self.date=date self.logger=logger self.comment=comment self.plotFn=plotFn self.resultFitFwhm=resultFitFwhm self.fitfunc = lambda p, x: p[0] + p[1] * x self.errfunc = lambda p, x, y, res, err: (y - self.fitfunc(p, x)) self.temp=list() self.tempErr=list() self.minFitPos=list() self.minFitPosErr=list() self.fig = None self.ax1 = None self.fig = plt.figure() self.ax1 = self.fig.add_subplot(111) def fitData(self): """Fit function using optimize.leastsq(). :return par, flag: fit parameters, non zero if successful """ for rFF in self.resultFitFwhm: try: self.temp.append(float(rFF.ambientTemp)) except Exception, e: self.temp.append(rFF.ambientTemp) if self.debug: self.logger.debug('temperaturemodel: new list created, error: {}'.format(e)) self.tempErr.append(0.01) try: self.minFitPos.append(float(rFF.extrFitPos)) except Exception, e: self.logger.error('temperaturemodel: serious error: could not append type: {}, error: {}'.format(type(rFF.extrFitPos), e)) self.minFitPosErr.append(2.5) par= np.array([1., 1.]) self.flag=None try: self.par, self.flag = optimize.leastsq(self.errfunc, par, args=(np.asarray(self.temp), np.asarray(self.minFitPos),np.asarray(self.tempErr),np.asarray(self.minFitPosErr))) except Exception, e: self.logger.error('temperaturemodel: fit failed:\n{0}'.format(e)) return None, None return self.par, self.flag def plotData(self): """Display fit using matplotlib :return: :py:mod:`rts2saf.data.DataFit`.plotFn """ try: x_temp = np.linspace(min(self.temp), max(self.temp)) except Exception, e: self.logger.error('temperaturemodel: numpy error:\n{0}'.format(e)) return e self.ax1.plot(self.temp,self.minFitPos, 'ro', color='blue') self.ax1.errorbar(self.temp,self.minFitPos, xerr=self.tempErr, yerr=self.minFitPosErr, ecolor='blue', fmt=None) if self.flag: self.ax1.plot(x_temp, self.fitfunc(self.par, x_temp), 'r-', color='red') if self.comment: self.ax1.set_title('rts2saf {0}, temperature model, {1}'.format(self.date, self.comment), fontsize=12) else: self.ax1.set_title('rts2saf {0}, temperature model'.format(self.date), fontsize=12) self.ax1.set_xlabel('ambient temperature [degC]') self.ax1.set_ylabel('FOC_POS(min. FWHM) [ticks]') self.ax1.grid(True) if self.showPlot: plt.show() else: self.logger.warn('temperaturemodel: NO $DISPLAY no plot') # no return here try: self.fig.savefig(self.plotFn) return self.plotFn except Exception, e: self.logger.error('temperaturemodel: can not save plot to: {0}, matplotlib msg: {1}'.format(self.plotFn, e)) return e
jstrobl/rts2
scripts/rts2saf/rts2saf/temperaturemodel.py
Python
lgpl-3.0
4,927
[ "VisIt" ]
eaf77bc663c16f5ece233a468351e07bfad1526036bec8402dca07be9cfebc6f
# Version: 0.18 """The Versioneer - like a rocketeer, but for versions. The Versioneer ============== * like a rocketeer, but for versions! * https://github.com/warner/python-versioneer * Brian Warner * License: Public Domain * Compatible With: python2.6, 2.7, 3.2, 3.3, 3.4, 3.5, 3.6, and pypy * [![Latest Version] (https://pypip.in/version/versioneer/badge.svg?style=flat) ](https://pypi.python.org/pypi/versioneer/) * [![Build Status] (https://travis-ci.org/warner/python-versioneer.png?branch=master) ](https://travis-ci.org/warner/python-versioneer) This is a tool for managing a recorded version number in distutils-based python projects. The goal is to remove the tedious and error-prone "update the embedded version string" step from your release process. Making a new release should be as easy as recording a new tag in your version-control system, and maybe making new tarballs. ## Quick Install * `pip install versioneer` to somewhere to your $PATH * add a `[versioneer]` section to your setup.cfg (see below) * run `versioneer install` in your source tree, commit the results ## Version Identifiers Source trees come from a variety of places: * a version-control system checkout (mostly used by developers) * a nightly tarball, produced by build automation * a snapshot tarball, produced by a web-based VCS browser, like github's "tarball from tag" feature * a release tarball, produced by "setup.py sdist", distributed through PyPI Within each source tree, the version identifier (either a string or a number, this tool is format-agnostic) can come from a variety of places: * ask the VCS tool itself, e.g. "git describe" (for checkouts), which knows about recent "tags" and an absolute revision-id * the name of the directory into which the tarball was unpacked * an expanded VCS keyword ($Id$, etc) * a `_version.py` created by some earlier build step For released software, the version identifier is closely related to a VCS tag. Some projects use tag names that include more than just the version string (e.g. "myproject-1.2" instead of just "1.2"), in which case the tool needs to strip the tag prefix to extract the version identifier. For unreleased software (between tags), the version identifier should provide enough information to help developers recreate the same tree, while also giving them an idea of roughly how old the tree is (after version 1.2, before version 1.3). Many VCS systems can report a description that captures this, for example `git describe --tags --dirty --always` reports things like "0.7-1-g574ab98-dirty" to indicate that the checkout is one revision past the 0.7 tag, has a unique revision id of "574ab98", and is "dirty" (it has uncommitted changes. The version identifier is used for multiple purposes: * to allow the module to self-identify its version: `myproject.__version__` * to choose a name and prefix for a 'setup.py sdist' tarball ## Theory of Operation Versioneer works by adding a special `_version.py` file into your source tree, where your `__init__.py` can import it. This `_version.py` knows how to dynamically ask the VCS tool for version information at import time. `_version.py` also contains `$Revision$` markers, and the installation process marks `_version.py` to have this marker rewritten with a tag name during the `git archive` command. As a result, generated tarballs will contain enough information to get the proper version. To allow `setup.py` to compute a version too, a `versioneer.py` is added to the top level of your source tree, next to `setup.py` and the `setup.cfg` that configures it. This overrides several distutils/setuptools commands to compute the version when invoked, and changes `setup.py build` and `setup.py sdist` to replace `_version.py` with a small static file that contains just the generated version data. ## Installation See [INSTALL.md](./INSTALL.md) for detailed installation instructions. ## Version-String Flavors Code which uses Versioneer can learn about its version string at runtime by importing `_version` from your main `__init__.py` file and running the `get_versions()` function. From the "outside" (e.g. in `setup.py`), you can import the top-level `versioneer.py` and run `get_versions()`. Both functions return a dictionary with different flavors of version information: * `['version']`: A condensed version string, rendered using the selected style. This is the most commonly used value for the project's version string. The default "pep440" style yields strings like `0.11`, `0.11+2.g1076c97`, or `0.11+2.g1076c97.dirty`. See the "Styles" section below for alternative styles. * `['full-revisionid']`: detailed revision identifier. For Git, this is the full SHA1 commit id, e.g. "1076c978a8d3cfc70f408fe5974aa6c092c949ac". * `['date']`: Date and time of the latest `HEAD` commit. For Git, it is the commit date in ISO 8601 format. This will be None if the date is not available. * `['dirty']`: a boolean, True if the tree has uncommitted changes. Note that this is only accurate if run in a VCS checkout, otherwise it is likely to be False or None * `['error']`: if the version string could not be computed, this will be set to a string describing the problem, otherwise it will be None. It may be useful to throw an exception in setup.py if this is set, to avoid e.g. creating tarballs with a version string of "unknown". Some variants are more useful than others. Including `full-revisionid` in a bug report should allow developers to reconstruct the exact code being tested (or indicate the presence of local changes that should be shared with the developers). `version` is suitable for display in an "about" box or a CLI `--version` output: it can be easily compared against release notes and lists of bugs fixed in various releases. The installer adds the following text to your `__init__.py` to place a basic version in `YOURPROJECT.__version__`: from ._version import get_versions __version__ = get_versions()['version'] del get_versions ## Styles The setup.cfg `style=` configuration controls how the VCS information is rendered into a version string. The default style, "pep440", produces a PEP440-compliant string, equal to the un-prefixed tag name for actual releases, and containing an additional "local version" section with more detail for in-between builds. For Git, this is TAG[+DISTANCE.gHEX[.dirty]] , using information from `git describe --tags --dirty --always`. For example "0.11+2.g1076c97.dirty" indicates that the tree is like the "1076c97" commit but has uncommitted changes (".dirty"), and that this commit is two revisions ("+2") beyond the "0.11" tag. For released software (exactly equal to a known tag), the identifier will only contain the stripped tag, e.g. "0.11". Other styles are available. See [details.md](details.md) in the Versioneer source tree for descriptions. ## Debugging Versioneer tries to avoid fatal errors: if something goes wrong, it will tend to return a version of "0+unknown". To investigate the problem, run `setup.py version`, which will run the version-lookup code in a verbose mode, and will display the full contents of `get_versions()` (including the `error` string, which may help identify what went wrong). ## Known Limitations Some situations are known to cause problems for Versioneer. This details the most significant ones. More can be found on Github [issues page](https://github.com/warner/python-versioneer/issues). ### Subprojects Versioneer has limited support for source trees in which `setup.py` is not in the root directory (e.g. `setup.py` and `.git/` are *not* siblings). The are two common reasons why `setup.py` might not be in the root: * Source trees which contain multiple subprojects, such as [Buildbot](https://github.com/buildbot/buildbot), which contains both "master" and "slave" subprojects, each with their own `setup.py`, `setup.cfg`, and `tox.ini`. Projects like these produce multiple PyPI distributions (and upload multiple independently-installable tarballs). * Source trees whose main purpose is to contain a C library, but which also provide bindings to Python (and perhaps other langauges) in subdirectories. Versioneer will look for `.git` in parent directories, and most operations should get the right version string. However `pip` and `setuptools` have bugs and implementation details which frequently cause `pip install .` from a subproject directory to fail to find a correct version string (so it usually defaults to `0+unknown`). `pip install --editable .` should work correctly. `setup.py install` might work too. Pip-8.1.1 is known to have this problem, but hopefully it will get fixed in some later version. [Bug #38](https://github.com/warner/python-versioneer/issues/38) is tracking this issue. The discussion in [PR #61](https://github.com/warner/python-versioneer/pull/61) describes the issue from the Versioneer side in more detail. [pip PR#3176](https://github.com/pypa/pip/pull/3176) and [pip PR#3615](https://github.com/pypa/pip/pull/3615) contain work to improve pip to let Versioneer work correctly. Versioneer-0.16 and earlier only looked for a `.git` directory next to the `setup.cfg`, so subprojects were completely unsupported with those releases. ### Editable installs with setuptools <= 18.5 `setup.py develop` and `pip install --editable .` allow you to install a project into a virtualenv once, then continue editing the source code (and test) without re-installing after every change. "Entry-point scripts" (`setup(entry_points={"console_scripts": ..})`) are a convenient way to specify executable scripts that should be installed along with the python package. These both work as expected when using modern setuptools. When using setuptools-18.5 or earlier, however, certain operations will cause `pkg_resources.DistributionNotFound` errors when running the entrypoint script, which must be resolved by re-installing the package. This happens when the install happens with one version, then the egg_info data is regenerated while a different version is checked out. Many setup.py commands cause egg_info to be rebuilt (including `sdist`, `wheel`, and installing into a different virtualenv), so this can be surprising. [Bug #83](https://github.com/warner/python-versioneer/issues/83) describes this one, but upgrading to a newer version of setuptools should probably resolve it. ### Unicode version strings While Versioneer works (and is continually tested) with both Python 2 and Python 3, it is not entirely consistent with bytes-vs-unicode distinctions. Newer releases probably generate unicode version strings on py2. It's not clear that this is wrong, but it may be surprising for applications when then write these strings to a network connection or include them in bytes-oriented APIs like cryptographic checksums. [Bug #71](https://github.com/warner/python-versioneer/issues/71) investigates this question. ## Updating Versioneer To upgrade your project to a new release of Versioneer, do the following: * install the new Versioneer (`pip install -U versioneer` or equivalent) * edit `setup.cfg`, if necessary, to include any new configuration settings indicated by the release notes. See [UPGRADING](./UPGRADING.md) for details. * re-run `versioneer install` in your source tree, to replace `SRC/_version.py` * commit any changed files ## Future Directions This tool is designed to make it easily extended to other version-control systems: all VCS-specific components are in separate directories like src/git/ . The top-level `versioneer.py` script is assembled from these components by running make-versioneer.py . In the future, make-versioneer.py will take a VCS name as an argument, and will construct a version of `versioneer.py` that is specific to the given VCS. It might also take the configuration arguments that are currently provided manually during installation by editing setup.py . Alternatively, it might go the other direction and include code from all supported VCS systems, reducing the number of intermediate scripts. ## License To make Versioneer easier to embed, all its code is dedicated to the public domain. The `_version.py` that it creates is also in the public domain. Specifically, both are released under the Creative Commons "Public Domain Dedication" license (CC0-1.0), as described in https://creativecommons.org/publicdomain/zero/1.0/ . """ from __future__ import print_function import errno import json import os import re import subprocess import sys try: import configparser except ImportError: import ConfigParser as configparser class VersioneerConfig: """Container for Versioneer configuration parameters.""" def get_root(): """Get the project root directory. We require that all commands are run from the project root, i.e. the directory that contains setup.py, setup.cfg, and versioneer.py . """ root = os.path.realpath(os.path.abspath(os.getcwd())) setup_py = os.path.join(root, "setup.py") versioneer_py = os.path.join(root, "versioneer.py") if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)): # allow 'python path/to/setup.py COMMAND' root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0]))) setup_py = os.path.join(root, "setup.py") versioneer_py = os.path.join(root, "versioneer.py") if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)): err = ("Versioneer was unable to run the project root directory. " "Versioneer requires setup.py to be executed from " "its immediate directory (like 'python setup.py COMMAND'), " "or in a way that lets it use sys.argv[0] to find the root " "(like 'python path/to/setup.py COMMAND').") raise VersioneerBadRootError(err) try: # Certain runtime workflows (setup.py install/develop in a setuptools # tree) execute all dependencies in a single python process, so # "versioneer" may be imported multiple times, and python's shared # module-import table will cache the first one. So we can't use # os.path.dirname(__file__), as that will find whichever # versioneer.py was first imported, even in later projects. me = os.path.realpath(os.path.abspath(__file__)) me_dir = os.path.normcase(os.path.splitext(me)[0]) vsr_dir = os.path.normcase(os.path.splitext(versioneer_py)[0]) if me_dir != vsr_dir: print("Warning: build in %s is using versioneer.py from %s" % (os.path.dirname(me), versioneer_py)) except NameError: pass return root def get_config_from_root(root): """Read the project setup.cfg file to determine Versioneer config.""" # This might raise EnvironmentError (if setup.cfg is missing), or # configparser.NoSectionError (if it lacks a [versioneer] section), or # configparser.NoOptionError (if it lacks "VCS="). See the docstring at # the top of versioneer.py for instructions on writing your setup.cfg . setup_cfg = os.path.join(root, "setup.cfg") parser = configparser.SafeConfigParser() with open(setup_cfg, "r") as f: parser.readfp(f) VCS = parser.get("versioneer", "VCS") # mandatory def get(parser, name): if parser.has_option("versioneer", name): return parser.get("versioneer", name) return None cfg = VersioneerConfig() cfg.VCS = VCS cfg.style = get(parser, "style") or "" cfg.versionfile_source = get(parser, "versionfile_source") cfg.versionfile_build = get(parser, "versionfile_build") cfg.tag_prefix = get(parser, "tag_prefix") if cfg.tag_prefix in ("''", '""'): cfg.tag_prefix = "" cfg.parentdir_prefix = get(parser, "parentdir_prefix") cfg.verbose = get(parser, "verbose") return cfg class NotThisMethod(Exception): """Exception raised if a method is not valid for the current scenario.""" # these dictionaries contain VCS-specific tools LONG_VERSION_PY = {} HANDLERS = {} def register_vcs_handler(vcs, method): # decorator """Decorator to mark a method as the handler for a particular VCS.""" def decorate(f): """Store f in HANDLERS[vcs][method].""" if vcs not in HANDLERS: HANDLERS[vcs] = {} HANDLERS[vcs][method] = f return f return decorate def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None): """Call the given command(s).""" assert isinstance(commands, list) p = None for c in commands: try: dispcmd = str([c] + args) # remember shell=False, so use git.cmd on windows, not just git p = subprocess.Popen([c] + args, cwd=cwd, env=env, stdout=subprocess.PIPE, stderr=(subprocess.PIPE if hide_stderr else None)) break except EnvironmentError: e = sys.exc_info()[1] if e.errno == errno.ENOENT: continue if verbose: print("unable to run %s" % dispcmd) print(e) return None, None else: if verbose: print("unable to find command, tried %s" % (commands,)) return None, None stdout = p.communicate()[0].strip() if sys.version_info[0] >= 3: stdout = stdout.decode() if p.returncode != 0: if verbose: print("unable to run %s (error)" % dispcmd) print("stdout was %s" % stdout) return None, p.returncode return stdout, p.returncode LONG_VERSION_PY['git'] = ''' # This file helps to compute a version number in source trees obtained from # git-archive tarball (such as those provided by githubs download-from-tag # feature). Distribution tarballs (built by setup.py sdist) and build # directories (produced by setup.py build) will contain a much shorter file # that just contains the computed version number. # This file is released into the public domain. Generated by # versioneer-0.18 (https://github.com/warner/python-versioneer) """Git implementation of _version.py.""" import errno import os import re import subprocess import sys def get_keywords(): """Get the keywords needed to look up the version information.""" # these strings will be replaced by git during git-archive. # setup.py/versioneer.py will grep for the variable names, so they must # each be defined on a line of their own. _version.py will just call # get_keywords(). git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s" git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s" git_date = "%(DOLLAR)sFormat:%%ci%(DOLLAR)s" keywords = {"refnames": git_refnames, "full": git_full, "date": git_date} return keywords class VersioneerConfig: """Container for Versioneer configuration parameters.""" def get_config(): """Create, populate and return the VersioneerConfig() object.""" # these strings are filled in when 'setup.py versioneer' creates # _version.py cfg = VersioneerConfig() cfg.VCS = "git" cfg.style = "%(STYLE)s" cfg.tag_prefix = "%(TAG_PREFIX)s" cfg.parentdir_prefix = "%(PARENTDIR_PREFIX)s" cfg.versionfile_source = "%(VERSIONFILE_SOURCE)s" cfg.verbose = False return cfg class NotThisMethod(Exception): """Exception raised if a method is not valid for the current scenario.""" LONG_VERSION_PY = {} HANDLERS = {} def register_vcs_handler(vcs, method): # decorator """Decorator to mark a method as the handler for a particular VCS.""" def decorate(f): """Store f in HANDLERS[vcs][method].""" if vcs not in HANDLERS: HANDLERS[vcs] = {} HANDLERS[vcs][method] = f return f return decorate def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None): """Call the given command(s).""" assert isinstance(commands, list) p = None for c in commands: try: dispcmd = str([c] + args) # remember shell=False, so use git.cmd on windows, not just git p = subprocess.Popen([c] + args, cwd=cwd, env=env, stdout=subprocess.PIPE, stderr=(subprocess.PIPE if hide_stderr else None)) break except EnvironmentError: e = sys.exc_info()[1] if e.errno == errno.ENOENT: continue if verbose: print("unable to run %%s" %% dispcmd) print(e) return None, None else: if verbose: print("unable to find command, tried %%s" %% (commands,)) return None, None stdout = p.communicate()[0].strip() if sys.version_info[0] >= 3: stdout = stdout.decode() if p.returncode != 0: if verbose: print("unable to run %%s (error)" %% dispcmd) print("stdout was %%s" %% stdout) return None, p.returncode return stdout, p.returncode def versions_from_parentdir(parentdir_prefix, root, verbose): """Try to determine the version from the parent directory name. Source tarballs conventionally unpack into a directory that includes both the project name and a version string. We will also support searching up two directory levels for an appropriately named parent directory """ rootdirs = [] for i in range(3): dirname = os.path.basename(root) if dirname.startswith(parentdir_prefix): return {"version": dirname[len(parentdir_prefix):], "full-revisionid": None, "dirty": False, "error": None, "date": None} else: rootdirs.append(root) root = os.path.dirname(root) # up a level if verbose: print("Tried directories %%s but none started with prefix %%s" %% (str(rootdirs), parentdir_prefix)) raise NotThisMethod("rootdir doesn't start with parentdir_prefix") @register_vcs_handler("git", "get_keywords") def git_get_keywords(versionfile_abs): """Extract version information from the given file.""" # the code embedded in _version.py can just fetch the value of these # keywords. When used from setup.py, we don't want to import _version.py, # so we do it with a regexp instead. This function is not used from # _version.py. keywords = {} try: f = open(versionfile_abs, "r") for line in f.readlines(): if line.strip().startswith("git_refnames ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["refnames"] = mo.group(1) if line.strip().startswith("git_full ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["full"] = mo.group(1) if line.strip().startswith("git_date ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["date"] = mo.group(1) f.close() except EnvironmentError: pass return keywords @register_vcs_handler("git", "keywords") def git_versions_from_keywords(keywords, tag_prefix, verbose): """Get version information from git keywords.""" if not keywords: raise NotThisMethod("no keywords at all, weird") date = keywords.get("date") if date is not None: # git-2.2.0 added "%%cI", which expands to an ISO-8601 -compliant # datestamp. However we prefer "%%ci" (which expands to an "ISO-8601 # -like" string, which we must then edit to make compliant), because # it's been around since git-1.5.3, and it's too difficult to # discover which version we're using, or to work around using an # older one. date = date.strip().replace(" ", "T", 1).replace(" ", "", 1) refnames = keywords["refnames"].strip() if refnames.startswith("$Format"): if verbose: print("keywords are unexpanded, not using") raise NotThisMethod("unexpanded keywords, not a git-archive tarball") refs = set([r.strip() for r in refnames.strip("()").split(",")]) # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of # just "foo-1.0". If we see a "tag: " prefix, prefer those. TAG = "tag: " tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)]) if not tags: # Either we're using git < 1.8.3, or there really are no tags. We use # a heuristic: assume all version tags have a digit. The old git %%d # expansion behaves like git log --decorate=short and strips out the # refs/heads/ and refs/tags/ prefixes that would let us distinguish # between branches and tags. By ignoring refnames without digits, we # filter out many common branch names like "release" and # "stabilization", as well as "HEAD" and "master". tags = set([r for r in refs if re.search(r'\d', r)]) if verbose: print("discarding '%%s', no digits" %% ",".join(refs - tags)) if verbose: print("likely tags: %%s" %% ",".join(sorted(tags))) for ref in sorted(tags): # sorting will prefer e.g. "2.0" over "2.0rc1" if ref.startswith(tag_prefix): r = ref[len(tag_prefix):] if verbose: print("picking %%s" %% r) return {"version": r, "full-revisionid": keywords["full"].strip(), "dirty": False, "error": None, "date": date} # no suitable tags, so version is "0+unknown", but full hex is still there if verbose: print("no suitable tags, using unknown + full revision id") return {"version": "0+unknown", "full-revisionid": keywords["full"].strip(), "dirty": False, "error": "no suitable tags", "date": None} @register_vcs_handler("git", "pieces_from_vcs") def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): """Get version from 'git describe' in the root of the source tree. This only gets called if the git-archive 'subst' keywords were *not* expanded, and _version.py hasn't already been rewritten with a short version string, meaning we're inside a checked out source tree. """ GITS = ["git"] if sys.platform == "win32": GITS = ["git.cmd", "git.exe"] out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True) if rc != 0: if verbose: print("Directory %%s not under git control" %% root) raise NotThisMethod("'git rev-parse --git-dir' returned error") # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] # if there isn't one, this yields HEX[-dirty] (no NUM) describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty", "--always", "--long", "--match", "%%s*" %% tag_prefix], cwd=root) # --long was added in git-1.5.5 if describe_out is None: raise NotThisMethod("'git describe' failed") describe_out = describe_out.strip() full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root) if full_out is None: raise NotThisMethod("'git rev-parse' failed") full_out = full_out.strip() pieces = {} pieces["long"] = full_out pieces["short"] = full_out[:7] # maybe improved later pieces["error"] = None # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] # TAG might have hyphens. git_describe = describe_out # look for -dirty suffix dirty = git_describe.endswith("-dirty") pieces["dirty"] = dirty if dirty: git_describe = git_describe[:git_describe.rindex("-dirty")] # now we have TAG-NUM-gHEX or HEX if "-" in git_describe: # TAG-NUM-gHEX mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) if not mo: # unparseable. Maybe git-describe is misbehaving? pieces["error"] = ("unable to parse git-describe output: '%%s'" %% describe_out) return pieces # tag full_tag = mo.group(1) if not full_tag.startswith(tag_prefix): if verbose: fmt = "tag '%%s' doesn't start with prefix '%%s'" print(fmt %% (full_tag, tag_prefix)) pieces["error"] = ("tag '%%s' doesn't start with prefix '%%s'" %% (full_tag, tag_prefix)) return pieces pieces["closest-tag"] = full_tag[len(tag_prefix):] # distance: number of commits since tag pieces["distance"] = int(mo.group(2)) # commit: short hex revision ID pieces["short"] = mo.group(3) else: # HEX: no tags pieces["closest-tag"] = None count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root) pieces["distance"] = int(count_out) # total number of commits # commit date: see ISO-8601 comment in git_versions_from_keywords() date = run_command(GITS, ["show", "-s", "--format=%%ci", "HEAD"], cwd=root)[0].strip() pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) return pieces def plus_or_dot(pieces): """Return a + if we don't already have one, else return a .""" if "+" in pieces.get("closest-tag", ""): return "." return "+" def render_pep440(pieces): """Build up version string, with post-release "local version identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty Exceptions: 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += plus_or_dot(pieces) rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" else: # exception #1 rendered = "0+untagged.%%d.g%%s" %% (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" return rendered def render_pep440_pre(pieces): """TAG[.post.devDISTANCE] -- No -dirty. Exceptions: 1: no tags. 0.post.devDISTANCE """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"]: rendered += ".post.dev%%d" %% pieces["distance"] else: # exception #1 rendered = "0.post.dev%%d" %% pieces["distance"] return rendered def render_pep440_post(pieces): """TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that .dev0 sorts backwards (a dirty tree will appear "older" than the corresponding clean one), but you shouldn't be releasing software with -dirty anyways. Exceptions: 1: no tags. 0.postDISTANCE[.dev0] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%%d" %% pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += plus_or_dot(pieces) rendered += "g%%s" %% pieces["short"] else: # exception #1 rendered = "0.post%%d" %% pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += "+g%%s" %% pieces["short"] return rendered def render_pep440_old(pieces): """TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty. Eexceptions: 1: no tags. 0.postDISTANCE[.dev0] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%%d" %% pieces["distance"] if pieces["dirty"]: rendered += ".dev0" else: # exception #1 rendered = "0.post%%d" %% pieces["distance"] if pieces["dirty"]: rendered += ".dev0" return rendered def render_git_describe(pieces): """TAG[-DISTANCE-gHEX][-dirty]. Like 'git describe --tags --dirty --always'. Exceptions: 1: no tags. HEX[-dirty] (note: no 'g' prefix) """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"]: rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered def render_git_describe_long(pieces): """TAG-DISTANCE-gHEX[-dirty]. Like 'git describe --tags --dirty --always -long'. The distance/hash is unconditional. Exceptions: 1: no tags. HEX[-dirty] (note: no 'g' prefix) """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered def render(pieces, style): """Render the given version pieces into the requested style.""" if pieces["error"]: return {"version": "unknown", "full-revisionid": pieces.get("long"), "dirty": None, "error": pieces["error"], "date": None} if not style or style == "default": style = "pep440" # the default if style == "pep440": rendered = render_pep440(pieces) elif style == "pep440-pre": rendered = render_pep440_pre(pieces) elif style == "pep440-post": rendered = render_pep440_post(pieces) elif style == "pep440-old": rendered = render_pep440_old(pieces) elif style == "git-describe": rendered = render_git_describe(pieces) elif style == "git-describe-long": rendered = render_git_describe_long(pieces) else: raise ValueError("unknown style '%%s'" %% style) return {"version": rendered, "full-revisionid": pieces["long"], "dirty": pieces["dirty"], "error": None, "date": pieces.get("date")} def get_versions(): """Get version information or return default if unable to do so.""" # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have # __file__, we can work backwards from there to the root. Some # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which # case we can only use expanded keywords. cfg = get_config() verbose = cfg.verbose try: return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose) except NotThisMethod: pass try: root = os.path.realpath(__file__) # versionfile_source is the relative path from the top of the source # tree (where the .git directory might live) to this file. Invert # this to find the root from __file__. for i in cfg.versionfile_source.split('/'): root = os.path.dirname(root) except NameError: return {"version": "0+unknown", "full-revisionid": None, "dirty": None, "error": "unable to find root of source tree", "date": None} try: pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose) return render(pieces, cfg.style) except NotThisMethod: pass try: if cfg.parentdir_prefix: return versions_from_parentdir(cfg.parentdir_prefix, root, verbose) except NotThisMethod: pass return {"version": "0+unknown", "full-revisionid": None, "dirty": None, "error": "unable to compute version", "date": None} ''' @register_vcs_handler("git", "get_keywords") def git_get_keywords(versionfile_abs): """Extract version information from the given file.""" # the code embedded in _version.py can just fetch the value of these # keywords. When used from setup.py, we don't want to import _version.py, # so we do it with a regexp instead. This function is not used from # _version.py. keywords = {} try: f = open(versionfile_abs, "r") for line in f.readlines(): if line.strip().startswith("git_refnames ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["refnames"] = mo.group(1) if line.strip().startswith("git_full ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["full"] = mo.group(1) if line.strip().startswith("git_date ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["date"] = mo.group(1) f.close() except EnvironmentError: pass return keywords @register_vcs_handler("git", "keywords") def git_versions_from_keywords(keywords, tag_prefix, verbose): """Get version information from git keywords.""" if not keywords: raise NotThisMethod("no keywords at all, weird") date = keywords.get("date") if date is not None: # git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant # datestamp. However we prefer "%ci" (which expands to an "ISO-8601 # -like" string, which we must then edit to make compliant), because # it's been around since git-1.5.3, and it's too difficult to # discover which version we're using, or to work around using an # older one. date = date.strip().replace(" ", "T", 1).replace(" ", "", 1) refnames = keywords["refnames"].strip() if refnames.startswith("$Format"): if verbose: print("keywords are unexpanded, not using") raise NotThisMethod("unexpanded keywords, not a git-archive tarball") refs = set([r.strip() for r in refnames.strip("()").split(",")]) # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of # just "foo-1.0". If we see a "tag: " prefix, prefer those. TAG = "tag: " tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)]) if not tags: # Either we're using git < 1.8.3, or there really are no tags. We use # a heuristic: assume all version tags have a digit. The old git %d # expansion behaves like git log --decorate=short and strips out the # refs/heads/ and refs/tags/ prefixes that would let us distinguish # between branches and tags. By ignoring refnames without digits, we # filter out many common branch names like "release" and # "stabilization", as well as "HEAD" and "master". tags = set([r for r in refs if re.search(r'\d', r)]) if verbose: print("discarding '%s', no digits" % ",".join(refs - tags)) if verbose: print("likely tags: %s" % ",".join(sorted(tags))) for ref in sorted(tags): # sorting will prefer e.g. "2.0" over "2.0rc1" if ref.startswith(tag_prefix): r = ref[len(tag_prefix):] if verbose: print("picking %s" % r) return {"version": r, "full-revisionid": keywords["full"].strip(), "dirty": False, "error": None, "date": date} # no suitable tags, so version is "0+unknown", but full hex is still there if verbose: print("no suitable tags, using unknown + full revision id") return {"version": "0+unknown", "full-revisionid": keywords["full"].strip(), "dirty": False, "error": "no suitable tags", "date": None} @register_vcs_handler("git", "pieces_from_vcs") def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): """Get version from 'git describe' in the root of the source tree. This only gets called if the git-archive 'subst' keywords were *not* expanded, and _version.py hasn't already been rewritten with a short version string, meaning we're inside a checked out source tree. """ GITS = ["git"] if sys.platform == "win32": GITS = ["git.cmd", "git.exe"] out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True) if rc != 0: if verbose: print("Directory %s not under git control" % root) raise NotThisMethod("'git rev-parse --git-dir' returned error") # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] # if there isn't one, this yields HEX[-dirty] (no NUM) describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty", "--always", "--long", "--match", "%s*" % tag_prefix], cwd=root) # --long was added in git-1.5.5 if describe_out is None: raise NotThisMethod("'git describe' failed") describe_out = describe_out.strip() full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root) if full_out is None: raise NotThisMethod("'git rev-parse' failed") full_out = full_out.strip() pieces = {} pieces["long"] = full_out pieces["short"] = full_out[:7] # maybe improved later pieces["error"] = None # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] # TAG might have hyphens. git_describe = describe_out # look for -dirty suffix dirty = git_describe.endswith("-dirty") pieces["dirty"] = dirty if dirty: git_describe = git_describe[:git_describe.rindex("-dirty")] # now we have TAG-NUM-gHEX or HEX if "-" in git_describe: # TAG-NUM-gHEX mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) if not mo: # unparseable. Maybe git-describe is misbehaving? pieces["error"] = ("unable to parse git-describe output: '%s'" % describe_out) return pieces # tag full_tag = mo.group(1) if not full_tag.startswith(tag_prefix): if verbose: fmt = "tag '%s' doesn't start with prefix '%s'" print(fmt % (full_tag, tag_prefix)) pieces["error"] = ("tag '%s' doesn't start with prefix '%s'" % (full_tag, tag_prefix)) return pieces pieces["closest-tag"] = full_tag[len(tag_prefix):] # distance: number of commits since tag pieces["distance"] = int(mo.group(2)) # commit: short hex revision ID pieces["short"] = mo.group(3) else: # HEX: no tags pieces["closest-tag"] = None count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root) pieces["distance"] = int(count_out) # total number of commits # commit date: see ISO-8601 comment in git_versions_from_keywords() date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[0].strip() pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) return pieces def do_vcs_install(manifest_in, versionfile_source, ipy): """Git-specific installation logic for Versioneer. For Git, this means creating/changing .gitattributes to mark _version.py for export-subst keyword substitution. """ GITS = ["git"] if sys.platform == "win32": GITS = ["git.cmd", "git.exe"] files = [manifest_in, versionfile_source] if ipy: files.append(ipy) try: me = __file__ if me.endswith(".pyc") or me.endswith(".pyo"): me = os.path.splitext(me)[0] + ".py" versioneer_file = os.path.relpath(me) except NameError: versioneer_file = "versioneer.py" files.append(versioneer_file) present = False try: f = open(".gitattributes", "r") for line in f.readlines(): if line.strip().startswith(versionfile_source): if "export-subst" in line.strip().split()[1:]: present = True f.close() except EnvironmentError: pass if not present: f = open(".gitattributes", "a+") f.write("%s export-subst\n" % versionfile_source) f.close() files.append(".gitattributes") run_command(GITS, ["add", "--"] + files) def versions_from_parentdir(parentdir_prefix, root, verbose): """Try to determine the version from the parent directory name. Source tarballs conventionally unpack into a directory that includes both the project name and a version string. We will also support searching up two directory levels for an appropriately named parent directory """ rootdirs = [] for i in range(3): dirname = os.path.basename(root) if dirname.startswith(parentdir_prefix): return {"version": dirname[len(parentdir_prefix):], "full-revisionid": None, "dirty": False, "error": None, "date": None} else: rootdirs.append(root) root = os.path.dirname(root) # up a level if verbose: print("Tried directories %s but none started with prefix %s" % (str(rootdirs), parentdir_prefix)) raise NotThisMethod("rootdir doesn't start with parentdir_prefix") SHORT_VERSION_PY = """ # This file was generated by 'versioneer.py' (0.18) from # revision-control system data, or from the parent directory name of an # unpacked source archive. Distribution tarballs contain a pre-generated copy # of this file. import json version_json = ''' %s ''' # END VERSION_JSON def get_versions(): return json.loads(version_json) """ def versions_from_file(filename): """Try to determine the version from _version.py if present.""" try: with open(filename) as f: contents = f.read() except EnvironmentError: raise NotThisMethod("unable to read _version.py") mo = re.search(r"version_json = '''\n(.*)''' # END VERSION_JSON", contents, re.M | re.S) if not mo: mo = re.search(r"version_json = '''\r\n(.*)''' # END VERSION_JSON", contents, re.M | re.S) if not mo: raise NotThisMethod("no version_json in _version.py") return json.loads(mo.group(1)) def write_to_version_file(filename, versions): """Write the given version number to the given _version.py file.""" os.unlink(filename) contents = json.dumps(versions, sort_keys=True, indent=1, separators=(",", ": ")) with open(filename, "w") as f: f.write(SHORT_VERSION_PY % contents) print("set %s to '%s'" % (filename, versions["version"])) def plus_or_dot(pieces): """Return a + if we don't already have one, else return a .""" if "+" in pieces.get("closest-tag", ""): return "." return "+" def render_pep440(pieces): """Build up version string, with post-release "local version identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty Exceptions: 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += plus_or_dot(pieces) rendered += "%d.g%s" % (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" else: # exception #1 rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" return rendered def render_pep440_pre(pieces): """TAG[.post.devDISTANCE] -- No -dirty. Exceptions: 1: no tags. 0.post.devDISTANCE """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"]: rendered += ".post.dev%d" % pieces["distance"] else: # exception #1 rendered = "0.post.dev%d" % pieces["distance"] return rendered def render_pep440_post(pieces): """TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that .dev0 sorts backwards (a dirty tree will appear "older" than the corresponding clean one), but you shouldn't be releasing software with -dirty anyways. Exceptions: 1: no tags. 0.postDISTANCE[.dev0] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += plus_or_dot(pieces) rendered += "g%s" % pieces["short"] else: # exception #1 rendered = "0.post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += "+g%s" % pieces["short"] return rendered def render_pep440_old(pieces): """TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty. Eexceptions: 1: no tags. 0.postDISTANCE[.dev0] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" else: # exception #1 rendered = "0.post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" return rendered def render_git_describe(pieces): """TAG[-DISTANCE-gHEX][-dirty]. Like 'git describe --tags --dirty --always'. Exceptions: 1: no tags. HEX[-dirty] (note: no 'g' prefix) """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"]: rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered def render_git_describe_long(pieces): """TAG-DISTANCE-gHEX[-dirty]. Like 'git describe --tags --dirty --always -long'. The distance/hash is unconditional. Exceptions: 1: no tags. HEX[-dirty] (note: no 'g' prefix) """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered def render(pieces, style): """Render the given version pieces into the requested style.""" if pieces["error"]: return {"version": "unknown", "full-revisionid": pieces.get("long"), "dirty": None, "error": pieces["error"], "date": None} if not style or style == "default": style = "pep440" # the default if style == "pep440": rendered = render_pep440(pieces) elif style == "pep440-pre": rendered = render_pep440_pre(pieces) elif style == "pep440-post": rendered = render_pep440_post(pieces) elif style == "pep440-old": rendered = render_pep440_old(pieces) elif style == "git-describe": rendered = render_git_describe(pieces) elif style == "git-describe-long": rendered = render_git_describe_long(pieces) else: raise ValueError("unknown style '%s'" % style) return {"version": rendered, "full-revisionid": pieces["long"], "dirty": pieces["dirty"], "error": None, "date": pieces.get("date")} class VersioneerBadRootError(Exception): """The project root directory is unknown or missing key files.""" def get_versions(verbose=False): """Get the project version from whatever source is available. Returns dict with two keys: 'version' and 'full'. """ if "versioneer" in sys.modules: # see the discussion in cmdclass.py:get_cmdclass() del sys.modules["versioneer"] root = get_root() cfg = get_config_from_root(root) assert cfg.VCS is not None, "please set [versioneer]VCS= in setup.cfg" handlers = HANDLERS.get(cfg.VCS) assert handlers, "unrecognized VCS '%s'" % cfg.VCS verbose = verbose or cfg.verbose assert cfg.versionfile_source is not None, \ "please set versioneer.versionfile_source" assert cfg.tag_prefix is not None, "please set versioneer.tag_prefix" versionfile_abs = os.path.join(root, cfg.versionfile_source) # extract version from first of: _version.py, VCS command (e.g. 'git # describe'), parentdir. This is meant to work for developers using a # source checkout, for users of a tarball created by 'setup.py sdist', # and for users of a tarball/zipball created by 'git archive' or github's # download-from-tag feature or the equivalent in other VCSes. get_keywords_f = handlers.get("get_keywords") from_keywords_f = handlers.get("keywords") if get_keywords_f and from_keywords_f: try: keywords = get_keywords_f(versionfile_abs) ver = from_keywords_f(keywords, cfg.tag_prefix, verbose) if verbose: print("got version from expanded keyword %s" % ver) return ver except NotThisMethod: pass try: ver = versions_from_file(versionfile_abs) if verbose: print("got version from file %s %s" % (versionfile_abs, ver)) return ver except NotThisMethod: pass from_vcs_f = handlers.get("pieces_from_vcs") if from_vcs_f: try: pieces = from_vcs_f(cfg.tag_prefix, root, verbose) ver = render(pieces, cfg.style) if verbose: print("got version from VCS %s" % ver) return ver except NotThisMethod: pass try: if cfg.parentdir_prefix: ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose) if verbose: print("got version from parentdir %s" % ver) return ver except NotThisMethod: pass if verbose: print("unable to compute version") return {"version": "0+unknown", "full-revisionid": None, "dirty": None, "error": "unable to compute version", "date": None} def get_version(): """Get the short version string for this project.""" return get_versions()["version"] def get_cmdclass(): """Get the custom setuptools/distutils subclasses used by Versioneer.""" if "versioneer" in sys.modules: del sys.modules["versioneer"] # this fixes the "python setup.py develop" case (also 'install' and # 'easy_install .'), in which subdependencies of the main project are # built (using setup.py bdist_egg) in the same python process. Assume # a main project A and a dependency B, which use different versions # of Versioneer. A's setup.py imports A's Versioneer, leaving it in # sys.modules by the time B's setup.py is executed, causing B to run # with the wrong versioneer. Setuptools wraps the sub-dep builds in a # sandbox that restores sys.modules to it's pre-build state, so the # parent is protected against the child's "import versioneer". By # removing ourselves from sys.modules here, before the child build # happens, we protect the child from the parent's versioneer too. # Also see https://github.com/warner/python-versioneer/issues/52 cmds = {} # we add "version" to both distutils and setuptools from distutils.core import Command class cmd_version(Command): description = "report generated version string" user_options = [] boolean_options = [] def initialize_options(self): pass def finalize_options(self): pass def run(self): vers = get_versions(verbose=True) print("Version: %s" % vers["version"]) print(" full-revisionid: %s" % vers.get("full-revisionid")) print(" dirty: %s" % vers.get("dirty")) print(" date: %s" % vers.get("date")) if vers["error"]: print(" error: %s" % vers["error"]) cmds["version"] = cmd_version # we override "build_py" in both distutils and setuptools # # most invocation pathways end up running build_py: # distutils/build -> build_py # distutils/install -> distutils/build ->.. # setuptools/bdist_wheel -> distutils/install ->.. # setuptools/bdist_egg -> distutils/install_lib -> build_py # setuptools/install -> bdist_egg ->.. # setuptools/develop -> ? # pip install: # copies source tree to a tempdir before running egg_info/etc # if .git isn't copied too, 'git describe' will fail # then does setup.py bdist_wheel, or sometimes setup.py install # setup.py egg_info -> ? # we override different "build_py" commands for both environments if "setuptools" in sys.modules: from setuptools.command.build_py import build_py as _build_py else: from distutils.command.build_py import build_py as _build_py class cmd_build_py(_build_py): def run(self): root = get_root() cfg = get_config_from_root(root) versions = get_versions() _build_py.run(self) # now locate _version.py in the new build/ directory and replace # it with an updated value if cfg.versionfile_build: target_versionfile = os.path.join(self.build_lib, cfg.versionfile_build) print("UPDATING %s" % target_versionfile) write_to_version_file(target_versionfile, versions) cmds["build_py"] = cmd_build_py if "cx_Freeze" in sys.modules: # cx_freeze enabled? from cx_Freeze.dist import build_exe as _build_exe # nczeczulin reports that py2exe won't like the pep440-style string # as FILEVERSION, but it can be used for PRODUCTVERSION, e.g. # setup(console=[{ # "version": versioneer.get_version().split("+", 1)[0], # FILEVERSION # "product_version": versioneer.get_version(), # ... class cmd_build_exe(_build_exe): def run(self): root = get_root() cfg = get_config_from_root(root) versions = get_versions() target_versionfile = cfg.versionfile_source print("UPDATING %s" % target_versionfile) write_to_version_file(target_versionfile, versions) _build_exe.run(self) os.unlink(target_versionfile) with open(cfg.versionfile_source, "w") as f: LONG = LONG_VERSION_PY[cfg.VCS] f.write(LONG % {"DOLLAR": "$", "STYLE": cfg.style, "TAG_PREFIX": cfg.tag_prefix, "PARENTDIR_PREFIX": cfg.parentdir_prefix, "VERSIONFILE_SOURCE": cfg.versionfile_source, }) cmds["build_exe"] = cmd_build_exe del cmds["build_py"] if 'py2exe' in sys.modules: # py2exe enabled? try: from py2exe.distutils_buildexe import py2exe as _py2exe # py3 except ImportError: from py2exe.build_exe import py2exe as _py2exe # py2 class cmd_py2exe(_py2exe): def run(self): root = get_root() cfg = get_config_from_root(root) versions = get_versions() target_versionfile = cfg.versionfile_source print("UPDATING %s" % target_versionfile) write_to_version_file(target_versionfile, versions) _py2exe.run(self) os.unlink(target_versionfile) with open(cfg.versionfile_source, "w") as f: LONG = LONG_VERSION_PY[cfg.VCS] f.write(LONG % {"DOLLAR": "$", "STYLE": cfg.style, "TAG_PREFIX": cfg.tag_prefix, "PARENTDIR_PREFIX": cfg.parentdir_prefix, "VERSIONFILE_SOURCE": cfg.versionfile_source, }) cmds["py2exe"] = cmd_py2exe # we override different "sdist" commands for both environments if "setuptools" in sys.modules: from setuptools.command.sdist import sdist as _sdist else: from distutils.command.sdist import sdist as _sdist class cmd_sdist(_sdist): def run(self): versions = get_versions() self._versioneer_generated_versions = versions # unless we update this, the command will keep using the old # version self.distribution.metadata.version = versions["version"] return _sdist.run(self) def make_release_tree(self, base_dir, files): root = get_root() cfg = get_config_from_root(root) _sdist.make_release_tree(self, base_dir, files) # now locate _version.py in the new base_dir directory # (remembering that it may be a hardlink) and replace it with an # updated value target_versionfile = os.path.join(base_dir, cfg.versionfile_source) print("UPDATING %s" % target_versionfile) write_to_version_file(target_versionfile, self._versioneer_generated_versions) cmds["sdist"] = cmd_sdist return cmds CONFIG_ERROR = """ setup.cfg is missing the necessary Versioneer configuration. You need a section like: [versioneer] VCS = git style = pep440 versionfile_source = src/myproject/_version.py versionfile_build = myproject/_version.py tag_prefix = parentdir_prefix = myproject- You will also need to edit your setup.py to use the results: import versioneer setup(version=versioneer.get_version(), cmdclass=versioneer.get_cmdclass(), ...) Please read the docstring in ./versioneer.py for configuration instructions, edit setup.cfg, and re-run the installer or 'python versioneer.py setup'. """ SAMPLE_CONFIG = """ # See the docstring in versioneer.py for instructions. Note that you must # re-run 'versioneer.py setup' after changing this section, and commit the # resulting files. [versioneer] #VCS = git #style = pep440 #versionfile_source = #versionfile_build = #tag_prefix = #parentdir_prefix = """ INIT_PY_SNIPPET = """ from ._version import get_versions __version__ = get_versions()['version'] del get_versions """ def do_setup(): """Main VCS-independent setup function for installing Versioneer.""" root = get_root() try: cfg = get_config_from_root(root) except (EnvironmentError, configparser.NoSectionError, configparser.NoOptionError) as e: if isinstance(e, (EnvironmentError, configparser.NoSectionError)): print("Adding sample versioneer config to setup.cfg", file=sys.stderr) with open(os.path.join(root, "setup.cfg"), "a") as f: f.write(SAMPLE_CONFIG) print(CONFIG_ERROR, file=sys.stderr) return 1 print(" creating %s" % cfg.versionfile_source) with open(cfg.versionfile_source, "w") as f: LONG = LONG_VERSION_PY[cfg.VCS] f.write(LONG % {"DOLLAR": "$", "STYLE": cfg.style, "TAG_PREFIX": cfg.tag_prefix, "PARENTDIR_PREFIX": cfg.parentdir_prefix, "VERSIONFILE_SOURCE": cfg.versionfile_source, }) ipy = os.path.join(os.path.dirname(cfg.versionfile_source), "__init__.py") if os.path.exists(ipy): try: with open(ipy, "r") as f: old = f.read() except EnvironmentError: old = "" if INIT_PY_SNIPPET not in old: print(" appending to %s" % ipy) with open(ipy, "a") as f: f.write(INIT_PY_SNIPPET) else: print(" %s unmodified" % ipy) else: print(" %s doesn't exist, ok" % ipy) ipy = None # Make sure both the top-level "versioneer.py" and versionfile_source # (PKG/_version.py, used by runtime code) are in MANIFEST.in, so # they'll be copied into source distributions. Pip won't be able to # install the package without this. manifest_in = os.path.join(root, "MANIFEST.in") simple_includes = set() try: with open(manifest_in, "r") as f: for line in f: if line.startswith("include "): for include in line.split()[1:]: simple_includes.add(include) except EnvironmentError: pass # That doesn't cover everything MANIFEST.in can do # (http://docs.python.org/2/distutils/sourcedist.html#commands), so # it might give some false negatives. Appending redundant 'include' # lines is safe, though. if "versioneer.py" not in simple_includes: print(" appending 'versioneer.py' to MANIFEST.in") with open(manifest_in, "a") as f: f.write("include versioneer.py\n") else: print(" 'versioneer.py' already in MANIFEST.in") if cfg.versionfile_source not in simple_includes: print(" appending versionfile_source ('%s') to MANIFEST.in" % cfg.versionfile_source) with open(manifest_in, "a") as f: f.write("include %s\n" % cfg.versionfile_source) else: print(" versionfile_source already in MANIFEST.in") # Make VCS-specific changes. For git, this means creating/changing # .gitattributes to mark _version.py for export-subst keyword # substitution. do_vcs_install(manifest_in, cfg.versionfile_source, ipy) return 0 def scan_setup_py(): """Validate the contents of setup.py against Versioneer's expectations.""" found = set() setters = False errors = 0 with open("setup.py", "r") as f: for line in f.readlines(): if "import versioneer" in line: found.add("import") if "versioneer.get_cmdclass()" in line: found.add("cmdclass") if "versioneer.get_version()" in line: found.add("get_version") if "versioneer.VCS" in line: setters = True if "versioneer.versionfile_source" in line: setters = True if len(found) != 3: print("") print("Your setup.py appears to be missing some important items") print("(but I might be wrong). Please make sure it has something") print("roughly like the following:") print("") print(" import versioneer") print(" setup( version=versioneer.get_version(),") print(" cmdclass=versioneer.get_cmdclass(), ...)") print("") errors += 1 if setters: print("You should remove lines like 'versioneer.VCS = ' and") print("'versioneer.versionfile_source = ' . This configuration") print("now lives in setup.cfg, and should be removed from setup.py") print("") errors += 1 return errors if __name__ == "__main__": cmd = sys.argv[1] if cmd == "setup": errors = do_setup() errors += scan_setup_py() if errors: sys.exit(1)
spencerkclark/aospy
versioneer.py
Python
apache-2.0
68,612
[ "Brian" ]
9bb36a97747ab81f217208e39a61ebb2c806a0b316572e6ef513d57d4409898f
import tensorflow as tf import numpy as np import autoencoder.Utils class VariationalAutoencoder(object): def __init__(self, n_input, n_hidden, optimizer = tf.train.AdamOptimizer()): self.n_input = n_input self.n_hidden = n_hidden network_weights = self._initialize_weights() self.weights = network_weights # model self.x = tf.placeholder(tf.float32, [None, self.n_input]) self.z_mean = tf.add(tf.matmul(self.x, self.weights['w1']), self.weights['b1']) self.z_log_sigma_sq = tf.add(tf.matmul(self.x, self.weights['log_sigma_w1']), self.weights['log_sigma_b1']) # sample from gaussian distribution eps = tf.random_normal(tf.stack([tf.shape(self.x)[0], self.n_hidden]), 0, 1, dtype = tf.float32) self.z = tf.add(self.z_mean, tf.multiply(tf.sqrt(tf.exp(self.z_log_sigma_sq)), eps)) self.reconstruction = tf.add(tf.matmul(self.z, self.weights['w2']), self.weights['b2']) # cost reconstr_loss = 0.5 * tf.reduce_sum(tf.pow(tf.subtract(self.reconstruction, self.x), 2.0)) latent_loss = -0.5 * tf.reduce_sum(1 + self.z_log_sigma_sq - tf.square(self.z_mean) - tf.exp(self.z_log_sigma_sq), 1) self.cost = tf.reduce_mean(reconstr_loss + latent_loss) self.optimizer = optimizer.minimize(self.cost) init = tf.global_variables_initializer() self.sess = tf.Session() self.sess.run(init) def _initialize_weights(self): all_weights = dict() all_weights['w1'] = tf.Variable(autoencoder.Utils.xavier_init(self.n_input, self.n_hidden)) all_weights['log_sigma_w1'] = tf.Variable(autoencoder.Utils.xavier_init(self.n_input, self.n_hidden)) all_weights['b1'] = tf.Variable(tf.zeros([self.n_hidden], dtype=tf.float32)) all_weights['log_sigma_b1'] = tf.Variable(tf.zeros([self.n_hidden], dtype=tf.float32)) all_weights['w2'] = tf.Variable(tf.zeros([self.n_hidden, self.n_input], dtype=tf.float32)) all_weights['b2'] = tf.Variable(tf.zeros([self.n_input], dtype=tf.float32)) return all_weights def partial_fit(self, X): cost, opt = self.sess.run((self.cost, self.optimizer), feed_dict={self.x: X}) return cost def calc_total_cost(self, X): return self.sess.run(self.cost, feed_dict = {self.x: X}) def transform(self, X): return self.sess.run(self.z_mean, feed_dict={self.x: X}) def generate(self, hidden = None): if hidden is None: hidden = np.random.normal(size=self.weights["b1"]) return self.sess.run(self.reconstruction, feed_dict={self.z_mean: hidden}) def reconstruct(self, X): return self.sess.run(self.reconstruction, feed_dict={self.x: X}) def getWeights(self): return self.sess.run(self.weights['w1']) def getBiases(self): return self.sess.run(self.weights['b1'])
beomyeol/models
autoencoder/autoencoder_models/VariationalAutoencoder.py
Python
apache-2.0
2,995
[ "Gaussian" ]
9d27e335d25721f1f99dd09ea691b2d3fbd7f914071e308e97103576f28437b8
''' Gaussian smoothing with Python and OpenCV. ''' import cv2 import numpy as np import math import matplotlib.pyplot as plt def gaussian_smooth2(img, sigma): ''' Do gaussian smoothing with sigma. Returns the smoothed image. ''' # determine the length of the filter filter_length = math.ceil (sigma * 5) # make the length odd filter_length = int (2 * (int (filter_length) / 2) + 1) # Tip: smoothing=blurring, a filter=a kernel result = cv2.GaussianBlur (img, (filter_length, filter_length), sigma) return result # this part of the code is only executed if the file is run stand-alone if __name__ == '__main__': # read an image img = cv2.imread ('image.jpg') # smooth the image smoothed_img = gaussian_smooth2 (img, 2) # Showing the results plt.subplot (121) plt.imshow (img, cmap='gray') plt.title ('Original Image'), plt.xticks ([]), plt.yticks ([]) plt.subplot (122) plt.imshow (smoothed_img, cmap='gray') plt.title ('Smoothed Image'), plt.xticks ([]), plt.yticks ([]) plt.show ()
Vkomini/KU-Leuven
Computer Vision/Assignments/Assign1/smoothing2.py
Python
apache-2.0
1,011
[ "Gaussian" ]
0ce039cbb1a2fd5dd03ad79d0a43d9db484b5de81e1a761c1c15d03e40bb3130
from __future__ import absolute_import from __future__ import print_function from .base import NodeTransformer from .. import nodes #==============================================================================# class RulesetChain(object): def __init__(self, selectors, statements): assert isinstance(selectors, list) assert all(isinstance(sel, nodes.Selector) for sel in selectors) assert isinstance(statements, list) self.selector_seqs = [[s] for s in selectors] self.statements = statements def prepend_selectors(self, selectors): assert isinstance(selectors, list) if not selectors: return elif len(selectors) == 1: for selector_seq in self.selector_seqs: selector_seq.insert(0, selectors[0]) else: new_selector_seqs = [] for selector_seq in self.selector_seqs: for selector in selectors: new_selector_seq = [selector] + selector_seq new_selector_seqs.append(new_selector_seq) self.selector_seqs = new_selector_seqs def _resolve_selector(self, ancestors, selector): assert isinstance(ancestors, list) assert isinstance(selector, nodes.Selector) if not ancestors: return selector.children[:] for i,node in enumerate(selector.children): if isinstance(node, nodes.SimpleSelectorSequence) and isinstance(node.head, nodes.CombineAncestorSelector): newseq = selector.children[:] if node.tail: assert isinstance(ancestors[-1], nodes.SimpleSelectorSequence) anc = nodes.SimpleSelectorSequence(ancestors[-1].head, ancestors[-1].tail[:]) # clone()? ancestors[-1] = anc ancestors[-1].tail.extend(node.tail) newseq[i:i+1] = ancestors return newseq else: ancestors.append(nodes.DescendantCombinator()) ancestors.extend(selector.children) return ancestors def resolve_selectors(self): selectors = [] for selector_seq in self.selector_seqs: ancestors = [] tail = selector_seq while tail: ancestors = self._resolve_selector(ancestors, tail[0]) tail = tail[1:] sel = nodes.Selector(ancestors) selectors.append(sel) return selectors #==============================================================================# class RulesetFlattener(NodeTransformer): def __init__(self, options=None): pass def __call__(self, node): return self.visit(node) def visit(self, node): # All visits of RuleSets should be done by calling self.visit_RuleSet # directly from their parent nodes. assert node is not None assert not isinstance(node, nodes.RuleSet) return super(RulesetFlattener, self).visit(node) def visit_Stylesheet(self, node): i = 0 while i < len(node.statements): stmt = node.statements[i] if isinstance(stmt, nodes.RuleSet): # get chains from ruleset chains = self.visit_RuleSet(stmt) newrulesets = [] for chain in chains: # resolve chain selectors selectors = chain.resolve_selectors() statements = chain.statements # create new ruleset from resolved selectors and statements ruleset = nodes.RuleSet(selectors, statements) newrulesets.append(ruleset) # replace stmt with new rulesets node.statements[i:i+1] = newrulesets i += len(newrulesets) else: newstmt = self.visit(stmt) if newstmt: node.statements[i:i+1] = [newstmt] i += 1 else: node.statements[i:i+1] = [] return node def visit_RuleSet(self, node): child_rulesets = [] child_statements = [] # non-ruleset statements for stmt in node.statements: if isinstance(stmt, nodes.RuleSet): child_rulesets.append(stmt) elif isinstance(stmt, nodes.VarDef): # TODO: use pkg-specific exception raise RuntimeError('Cannot flatten rulesets containing variable definitions.') else: child_statements.append(stmt) allchains = [] for stmt in child_rulesets: chains = self.visit_RuleSet(stmt) allchains.extend(chains) for chain in allchains: chain.prepend_selectors(node.selectors) mychain = RulesetChain(node.selectors, child_statements) allchains.insert(0, mychain) assert all(isinstance(ch, RulesetChain) for ch in allchains) return allchains #==============================================================================#
colossalbit/cssypy
cssypy/visitors/flatteners.py
Python
bsd-3-clause
5,209
[ "VisIt" ]
e6d98d533d3e28ad47e8d5caa38b49aea0926875fd8902711c8b8e2a785d0161
#!/usr/bin/env python # # Author: Qiming Sun <osirpt.sun@gmail.com> # ''' A simple example to run CASSCF calculation. ''' import pyscf mol = pyscf.M( atom = 'O 0 0 0; O 0 0 1.2', basis = 'ccpvdz', spin = 2) myhf = mol.RHF().run() # 6 orbitals, 8 electrons mycas = myhf.CASSCF(6, 8).run() # # Note this mycas object can also be created using the APIs of mcscf module: # # from pyscf import mcscf # mycas = mcscf.CASSCF(myhf, 6, 8).run() # Natural occupancy in CAS space, Mulliken population etc. # See also 00-simple_casci.py for the instruction of the output of analyze() # method mycas.verbose = 4 mycas.analyze()
gkc1000/pyscf
examples/mcscf/00-simple_casscf.py
Python
apache-2.0
633
[ "PySCF" ]
c5df4e10767c70909c3128828d7f27ffa787815d654f9df3be7f255044de24c2
# coding: utf-8 """ Vericred API Vericred's API allows you to search for Health Plans that a specific doctor accepts. ## Getting Started Visit our [Developer Portal](https://developers.vericred.com) to create an account. Once you have created an account, you can create one Application for Production and another for our Sandbox (select the appropriate Plan when you create the Application). ## SDKs Our API follows standard REST conventions, so you can use any HTTP client to integrate with us. You will likely find it easier to use one of our [autogenerated SDKs](https://github.com/vericred/?query=vericred-), which we make available for several common programming languages. ## Authentication To authenticate, pass the API Key you created in the Developer Portal as a `Vericred-Api-Key` header. `curl -H 'Vericred-Api-Key: YOUR_KEY' "https://api.vericred.com/providers?search_term=Foo&zip_code=11215"` ## Versioning Vericred's API default to the latest version. However, if you need a specific version, you can request it with an `Accept-Version` header. The current version is `v3`. Previous versions are `v1` and `v2`. `curl -H 'Vericred-Api-Key: YOUR_KEY' -H 'Accept-Version: v2' "https://api.vericred.com/providers?search_term=Foo&zip_code=11215"` ## Pagination Endpoints that accept `page` and `per_page` parameters are paginated. They expose four additional fields that contain data about your position in the response, namely `Total`, `Per-Page`, `Link`, and `Page` as described in [RFC-5988](https://tools.ietf.org/html/rfc5988). For example, to display 5 results per page and view the second page of a `GET` to `/networks`, your final request would be `GET /networks?....page=2&per_page=5`. ## Sideloading When we return multiple levels of an object graph (e.g. `Provider`s and their `State`s we sideload the associated data. In this example, we would provide an Array of `State`s and a `state_id` for each provider. This is done primarily to reduce the payload size since many of the `Provider`s will share a `State` ``` { providers: [{ id: 1, state_id: 1}, { id: 2, state_id: 1 }], states: [{ id: 1, code: 'NY' }] } ``` If you need the second level of the object graph, you can just match the corresponding id. ## Selecting specific data All endpoints allow you to specify which fields you would like to return. This allows you to limit the response to contain only the data you need. For example, let's take a request that returns the following JSON by default ``` { provider: { id: 1, name: 'John', phone: '1234567890', field_we_dont_care_about: 'value_we_dont_care_about' }, states: [{ id: 1, name: 'New York', code: 'NY', field_we_dont_care_about: 'value_we_dont_care_about' }] } ``` To limit our results to only return the fields we care about, we specify the `select` query string parameter for the corresponding fields in the JSON document. In this case, we want to select `name` and `phone` from the `provider` key, so we would add the parameters `select=provider.name,provider.phone`. We also want the `name` and `code` from the `states` key, so we would add the parameters `select=states.name,states.code`. The id field of each document is always returned whether or not it is requested. Our final request would be `GET /providers/12345?select=provider.name,provider.phone,states.name,states.code` The response would be ``` { provider: { id: 1, name: 'John', phone: '1234567890' }, states: [{ id: 1, name: 'New York', code: 'NY' }] } ``` ## Benefits summary format Benefit cost-share strings are formatted to capture: * Network tiers * Compound or conditional cost-share * Limits on the cost-share * Benefit-specific maximum out-of-pocket costs **Example #1** As an example, we would represent [this Summary of Benefits &amp; Coverage](https://s3.amazonaws.com/vericred-data/SBC/2017/33602TX0780032.pdf) as: * **Hospital stay facility fees**: - Network Provider: `$400 copay/admit plus 20% coinsurance` - Out-of-Network Provider: `$1,500 copay/admit plus 50% coinsurance` - Vericred's format for this benefit: `In-Network: $400 before deductible then 20% after deductible / Out-of-Network: $1,500 before deductible then 50% after deductible` * **Rehabilitation services:** - Network Provider: `20% coinsurance` - Out-of-Network Provider: `50% coinsurance` - Limitations & Exceptions: `35 visit maximum per benefit period combined with Chiropractic care.` - Vericred's format for this benefit: `In-Network: 20% after deductible / Out-of-Network: 50% after deductible | limit: 35 visit(s) per Benefit Period` **Example #2** In [this other Summary of Benefits &amp; Coverage](https://s3.amazonaws.com/vericred-data/SBC/2017/40733CA0110568.pdf), the **specialty_drugs** cost-share has a maximum out-of-pocket for in-network pharmacies. * **Specialty drugs:** - Network Provider: `40% coinsurance up to a $500 maximum for up to a 30 day supply` - Out-of-Network Provider `Not covered` - Vericred's format for this benefit: `In-Network: 40% after deductible, up to $500 per script / Out-of-Network: 100%` **BNF** Here's a description of the benefits summary string, represented as a context-free grammar: ``` root ::= coverage coverage ::= (simple_coverage | tiered_coverage) (space pipe space coverage_modifier)? tiered_coverage ::= tier (space slash space tier)* tier ::= tier_name colon space (tier_coverage | not_applicable) tier_coverage ::= simple_coverage (space (then | or | and) space simple_coverage)* tier_limitation? simple_coverage ::= (pre_coverage_limitation space)? coverage_amount (space post_coverage_limitation)? (comma? space coverage_condition)? coverage_modifier ::= limit_condition colon space (((simple_coverage | simple_limitation) (semicolon space see_carrier_documentation)?) | see_carrier_documentation | waived_if_admitted | shared_across_tiers) waived_if_admitted ::= ("copay" space)? "waived if admitted" simple_limitation ::= pre_coverage_limitation space "copay applies" tier_name ::= "In-Network-Tier-2" | "Out-of-Network" | "In-Network" limit_condition ::= "limit" | "condition" tier_limitation ::= comma space "up to" space (currency | (integer space time_unit plural?)) (space post_coverage_limitation)? coverage_amount ::= currency | unlimited | included | unknown | percentage | (digits space (treatment_unit | time_unit) plural?) pre_coverage_limitation ::= first space digits space time_unit plural? post_coverage_limitation ::= (((then space currency) | "per condition") space)? "per" space (treatment_unit | (integer space time_unit) | time_unit) plural? coverage_condition ::= ("before deductible" | "after deductible" | "penalty" | allowance | "in-state" | "out-of-state") (space allowance)? allowance ::= upto_allowance | after_allowance upto_allowance ::= "up to" space (currency space)? "allowance" after_allowance ::= "after" space (currency space)? "allowance" see_carrier_documentation ::= "see carrier documentation for more information" shared_across_tiers ::= "shared across all tiers" unknown ::= "unknown" unlimited ::= /[uU]nlimited/ included ::= /[iI]ncluded in [mM]edical/ time_unit ::= /[hH]our/ | (((/[cC]alendar/ | /[cC]ontract/) space)? /[yY]ear/) | /[mM]onth/ | /[dD]ay/ | /[wW]eek/ | /[vV]isit/ | /[lL]ifetime/ | ((((/[bB]enefit/ plural?) | /[eE]ligibility/) space)? /[pP]eriod/) treatment_unit ::= /[pP]erson/ | /[gG]roup/ | /[cC]ondition/ | /[sS]cript/ | /[vV]isit/ | /[eE]xam/ | /[iI]tem/ | /[sS]tay/ | /[tT]reatment/ | /[aA]dmission/ | /[eE]pisode/ comma ::= "," colon ::= ":" semicolon ::= ";" pipe ::= "|" slash ::= "/" plural ::= "(s)" | "s" then ::= "then" | ("," space) | space or ::= "or" and ::= "and" not_applicable ::= "Not Applicable" | "N/A" | "NA" first ::= "first" currency ::= "$" number percentage ::= number "%" number ::= float | integer float ::= digits "." digits integer ::= /[0-9]/+ (comma_int | under_int)* comma_int ::= ("," /[0-9]/*3) !"_" under_int ::= ("_" /[0-9]/*3) !"," digits ::= /[0-9]/+ ("_" /[0-9]/+)* space ::= /[ \t]/+ ``` OpenAPI spec version: 1.0.0 Generated by: https://github.com/swagger-api/swagger-codegen.git Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from pprint import pformat from six import iteritems import re class Provider(object): """ NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ def __init__(self, city=None, email=None, gender=None, first_name=None, id=None, last_name=None, latitude=None, longitude=None, middle_name=None, network_ids=None, organization_name=None, personal_phone=None, phone=None, presentation_name=None, specialty=None, state=None, state_id=None, street_line_1=None, street_line_2=None, suffix=None, title=None, type=None, zip_code=None, npis=None): """ Provider - a model defined in Swagger :param dict swaggerTypes: The key is attribute name and the value is attribute type. :param dict attributeMap: The key is attribute name and the value is json key in definition. """ self.swagger_types = { 'city': 'str', 'email': 'str', 'gender': 'str', 'first_name': 'str', 'id': 'int', 'last_name': 'str', 'latitude': 'float', 'longitude': 'float', 'middle_name': 'str', 'network_ids': 'list[int]', 'organization_name': 'str', 'personal_phone': 'str', 'phone': 'str', 'presentation_name': 'str', 'specialty': 'str', 'state': 'str', 'state_id': 'int', 'street_line_1': 'str', 'street_line_2': 'str', 'suffix': 'str', 'title': 'str', 'type': 'str', 'zip_code': 'str', 'npis': 'list[int]' } self.attribute_map = { 'city': 'city', 'email': 'email', 'gender': 'gender', 'first_name': 'first_name', 'id': 'id', 'last_name': 'last_name', 'latitude': 'latitude', 'longitude': 'longitude', 'middle_name': 'middle_name', 'network_ids': 'network_ids', 'organization_name': 'organization_name', 'personal_phone': 'personal_phone', 'phone': 'phone', 'presentation_name': 'presentation_name', 'specialty': 'specialty', 'state': 'state', 'state_id': 'state_id', 'street_line_1': 'street_line_1', 'street_line_2': 'street_line_2', 'suffix': 'suffix', 'title': 'title', 'type': 'type', 'zip_code': 'zip_code', 'npis': 'npis' } self._city = city self._email = email self._gender = gender self._first_name = first_name self._id = id self._last_name = last_name self._latitude = latitude self._longitude = longitude self._middle_name = middle_name self._network_ids = network_ids self._organization_name = organization_name self._personal_phone = personal_phone self._phone = phone self._presentation_name = presentation_name self._specialty = specialty self._state = state self._state_id = state_id self._street_line_1 = street_line_1 self._street_line_2 = street_line_2 self._suffix = suffix self._title = title self._type = type self._zip_code = zip_code self._npis = npis @property def city(self): """ Gets the city of this Provider. City name (e.g. Springfield). :return: The city of this Provider. :rtype: str """ return self._city @city.setter def city(self, city): """ Sets the city of this Provider. City name (e.g. Springfield). :param city: The city of this Provider. :type: str """ self._city = city @property def email(self): """ Gets the email of this Provider. Primary email address to contact the provider. :return: The email of this Provider. :rtype: str """ return self._email @email.setter def email(self, email): """ Sets the email of this Provider. Primary email address to contact the provider. :param email: The email of this Provider. :type: str """ self._email = email @property def gender(self): """ Gets the gender of this Provider. Provider's gender (M or F) :return: The gender of this Provider. :rtype: str """ return self._gender @gender.setter def gender(self, gender): """ Sets the gender of this Provider. Provider's gender (M or F) :param gender: The gender of this Provider. :type: str """ self._gender = gender @property def first_name(self): """ Gets the first_name of this Provider. Given name for the provider. :return: The first_name of this Provider. :rtype: str """ return self._first_name @first_name.setter def first_name(self, first_name): """ Sets the first_name of this Provider. Given name for the provider. :param first_name: The first_name of this Provider. :type: str """ self._first_name = first_name @property def id(self): """ Gets the id of this Provider. National Provider Index (NPI) number :return: The id of this Provider. :rtype: int """ return self._id @id.setter def id(self, id): """ Sets the id of this Provider. National Provider Index (NPI) number :param id: The id of this Provider. :type: int """ self._id = id @property def last_name(self): """ Gets the last_name of this Provider. Family name for the provider. :return: The last_name of this Provider. :rtype: str """ return self._last_name @last_name.setter def last_name(self, last_name): """ Sets the last_name of this Provider. Family name for the provider. :param last_name: The last_name of this Provider. :type: str """ self._last_name = last_name @property def latitude(self): """ Gets the latitude of this Provider. Latitude of provider :return: The latitude of this Provider. :rtype: float """ return self._latitude @latitude.setter def latitude(self, latitude): """ Sets the latitude of this Provider. Latitude of provider :param latitude: The latitude of this Provider. :type: float """ self._latitude = latitude @property def longitude(self): """ Gets the longitude of this Provider. Longitude of provider :return: The longitude of this Provider. :rtype: float """ return self._longitude @longitude.setter def longitude(self, longitude): """ Sets the longitude of this Provider. Longitude of provider :param longitude: The longitude of this Provider. :type: float """ self._longitude = longitude @property def middle_name(self): """ Gets the middle_name of this Provider. Middle name for the provider. :return: The middle_name of this Provider. :rtype: str """ return self._middle_name @middle_name.setter def middle_name(self, middle_name): """ Sets the middle_name of this Provider. Middle name for the provider. :param middle_name: The middle_name of this Provider. :type: str """ self._middle_name = middle_name @property def network_ids(self): """ Gets the network_ids of this Provider. Array of network ids :return: The network_ids of this Provider. :rtype: list[int] """ return self._network_ids @network_ids.setter def network_ids(self, network_ids): """ Sets the network_ids of this Provider. Array of network ids :param network_ids: The network_ids of this Provider. :type: list[int] """ self._network_ids = network_ids @property def organization_name(self): """ Gets the organization_name of this Provider. name for the providers of type: organization. :return: The organization_name of this Provider. :rtype: str """ return self._organization_name @organization_name.setter def organization_name(self, organization_name): """ Sets the organization_name of this Provider. name for the providers of type: organization. :param organization_name: The organization_name of this Provider. :type: str """ self._organization_name = organization_name @property def personal_phone(self): """ Gets the personal_phone of this Provider. Personal contact phone for the provider. :return: The personal_phone of this Provider. :rtype: str """ return self._personal_phone @personal_phone.setter def personal_phone(self, personal_phone): """ Sets the personal_phone of this Provider. Personal contact phone for the provider. :param personal_phone: The personal_phone of this Provider. :type: str """ self._personal_phone = personal_phone @property def phone(self): """ Gets the phone of this Provider. Office phone for the provider :return: The phone of this Provider. :rtype: str """ return self._phone @phone.setter def phone(self, phone): """ Sets the phone of this Provider. Office phone for the provider :param phone: The phone of this Provider. :type: str """ self._phone = phone @property def presentation_name(self): """ Gets the presentation_name of this Provider. Preferred name for display (e.g. Dr. Francis White may prefer Dr. Frank White) :return: The presentation_name of this Provider. :rtype: str """ return self._presentation_name @presentation_name.setter def presentation_name(self, presentation_name): """ Sets the presentation_name of this Provider. Preferred name for display (e.g. Dr. Francis White may prefer Dr. Frank White) :param presentation_name: The presentation_name of this Provider. :type: str """ self._presentation_name = presentation_name @property def specialty(self): """ Gets the specialty of this Provider. Name of the primary Specialty :return: The specialty of this Provider. :rtype: str """ return self._specialty @specialty.setter def specialty(self, specialty): """ Sets the specialty of this Provider. Name of the primary Specialty :param specialty: The specialty of this Provider. :type: str """ self._specialty = specialty @property def state(self): """ Gets the state of this Provider. State code for the provider's address (e.g. NY). :return: The state of this Provider. :rtype: str """ return self._state @state.setter def state(self, state): """ Sets the state of this Provider. State code for the provider's address (e.g. NY). :param state: The state of this Provider. :type: str """ self._state = state @property def state_id(self): """ Gets the state_id of this Provider. Foreign key to States :return: The state_id of this Provider. :rtype: int """ return self._state_id @state_id.setter def state_id(self, state_id): """ Sets the state_id of this Provider. Foreign key to States :param state_id: The state_id of this Provider. :type: int """ self._state_id = state_id @property def street_line_1(self): """ Gets the street_line_1 of this Provider. First line of the provider's street address. :return: The street_line_1 of this Provider. :rtype: str """ return self._street_line_1 @street_line_1.setter def street_line_1(self, street_line_1): """ Sets the street_line_1 of this Provider. First line of the provider's street address. :param street_line_1: The street_line_1 of this Provider. :type: str """ self._street_line_1 = street_line_1 @property def street_line_2(self): """ Gets the street_line_2 of this Provider. Second line of the provider's street address. :return: The street_line_2 of this Provider. :rtype: str """ return self._street_line_2 @street_line_2.setter def street_line_2(self, street_line_2): """ Sets the street_line_2 of this Provider. Second line of the provider's street address. :param street_line_2: The street_line_2 of this Provider. :type: str """ self._street_line_2 = street_line_2 @property def suffix(self): """ Gets the suffix of this Provider. Suffix for the provider's name (e.g. Jr) :return: The suffix of this Provider. :rtype: str """ return self._suffix @suffix.setter def suffix(self, suffix): """ Sets the suffix of this Provider. Suffix for the provider's name (e.g. Jr) :param suffix: The suffix of this Provider. :type: str """ self._suffix = suffix @property def title(self): """ Gets the title of this Provider. Professional title for the provider (e.g. Dr). :return: The title of this Provider. :rtype: str """ return self._title @title.setter def title(self, title): """ Sets the title of this Provider. Professional title for the provider (e.g. Dr). :param title: The title of this Provider. :type: str """ self._title = title @property def type(self): """ Gets the type of this Provider. Type of NPI number (individual provider vs organization). :return: The type of this Provider. :rtype: str """ return self._type @type.setter def type(self, type): """ Sets the type of this Provider. Type of NPI number (individual provider vs organization). :param type: The type of this Provider. :type: str """ self._type = type @property def zip_code(self): """ Gets the zip_code of this Provider. Postal code for the provider's address (e.g. 11215) :return: The zip_code of this Provider. :rtype: str """ return self._zip_code @zip_code.setter def zip_code(self, zip_code): """ Sets the zip_code of this Provider. Postal code for the provider's address (e.g. 11215) :param zip_code: The zip_code of this Provider. :type: str """ self._zip_code = zip_code @property def npis(self): """ Gets the npis of this Provider. The National Provider Index (NPI) numbers associated with this provider. :return: The npis of this Provider. :rtype: list[int] """ return self._npis @npis.setter def npis(self, npis): """ Sets the npis of this Provider. The National Provider Index (NPI) numbers associated with this provider. :param npis: The npis of this Provider. :type: list[int] """ self._npis = npis def to_dict(self): """ Returns the model properties as a dict """ result = {} for attr, _ in iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """ Returns the string representation of the model """ return pformat(self.to_dict()) def __repr__(self): """ For `print` and `pprint` """ return self.to_str() def __eq__(self, other): """ Returns true if both objects are equal """ return self.__dict__ == other.__dict__ def __ne__(self, other): """ Returns true if both objects are not equal """ return not self == other
vericred/vericred-python
vericred_client/models/provider.py
Python
apache-2.0
27,378
[ "VisIt" ]
0ea16acc83e477083d2c91e3a6b375798a91ad666f8652f311e05ca04c782156
from plugin import Plugin import discord import logging import urllib.request import os import uuid import styrobot import commands class Meme(Plugin): async def initialize(self, bot): self.tag = 'meme' self.shortTag = 'meme' self.defaultParser = commands.CommandRegistry.PARAM_PARSER_ALL self.defaultParserType = commands.ParamParserType.ALL @styrobot.plugincommand('10 Guy', name='tenguy') async def _tenguy_(self, server, channel, author, message): """ Generate a 10 Guy meme. `!meme tenguy <message>` **Example with top & bottom text:** `!meme tenguy top text:bottom text` **Example with top text:** `!meme tenguy text displayed on top` """ await self.displayMeme(channel, 'tenguy', message) @styrobot.plugincommand('Afraid to Ask Andy', name='afraid') async def _afraid_(self, server, channel, author, message): """ Generate an Afraid to Ask Andy meme. `!meme afraid <message>` **Example with top & bottom text:** `!meme afraid top text:bottom text` **Example with top text:** `!meme afraid text displayed on top` """ await self.displayMeme(channel, 'afraid', message) @styrobot.plugincommand('An Older Code Sir, But It Checks Out', name='older') async def _older_(self, server, channel, author, message): """ Generate An Older Code Sir, But It Checks Out meme. `!meme older <message>` **Example with top & bottom text:** `!meme older top text:bottom text` **Example with top text:** `!meme older text displayed on top` """ await self.displayMeme(channel, 'older', message) @styrobot.plugincommand('Ancient Aliens Guy', name='aag') async def _aag_(self, server, channel, author, message): """ Generate an Ancient Aliens Guy meme. `!meme aag <message>` **Example with top & bottom text:** `!meme aag top text:bottom text` **Example with top text:** `!meme aag text displayed on top` """ await self.displayMeme(channel, 'aag', message) @styrobot.plugincommand('At Least You Tried', name='tried') async def _tried_(self, server, channel, author, message): """ Generate an At Least You Tried meme. `!meme tried <message>` **Example with top & bottom text:** `!meme tried top text:bottom text` **Example with top text:** `!meme tried text displayed on top` """ await self.displayMeme(channel, 'tried', message) @styrobot.plugincommand('Baby Insanity Wolf', name='biw') async def _biw_(self, server, channel, author, message): """ Generate a Baby Insanity Wolf meme. `!meme biw <message>` **Example with top & bottom text:** `!meme biw top text:bottom text` **Example with top text:** `!meme biw text displayed on top` """ await self.displayMeme(channel, 'biw', message) @styrobot.plugincommand('Bad Luck Brian', name='blb') async def _blb_(self, server, channel, author, message): """ Generate a Bad Luck Brian meme. `!meme blb <message>` **Example with top & bottom text:** `!meme blb top text:bottom text` **Example with top text:** `!meme blb text displayed on top` """ await self.displayMeme(channel, 'blb', message) @styrobot.plugincommand('But That\'s None of My Business', name='kermit') async def _kermit_(self, server, channel, author, message): """ Generate a But That's None of My Business meme. `!meme kermit <message>` **Example with top & bottom text:** `!meme kermit top text:bottom text` **Example with top text:** `!meme kermit text displayed on top` """ await self.displayMeme(channel, 'kermit', message) @styrobot.plugincommand('Butthurt Dweller', name='bd') async def _bd_(self, server, channel, author, message): """ Generate a Butthurt Dweller meme. `!meme bd <message>` **Example with top & bottom text:** `!meme bd top text:bottom text` **Example with top text:** `!meme bd text displayed on top` """ await self.displayMeme(channel, 'bd', message) @styrobot.plugincommand('Captain Hindsight', name='ch') async def _ch_(self, server, channel, author, message): """ Generate a Captain Hindsight meme. `!meme ch <message>` **Example with top & bottom text:** `!meme ch top text:bottom text` **Example with top text:** `!meme ch text displayed on top` """ await self.displayMeme(channel, 'ch', message) @styrobot.plugincommand('Comic Book Guy', name='cbg') async def _cbg_(self, server, channel, author, message): """ Generate a Comic Book Guy meme. `!meme cbg <message>` **Example with top & bottom text:** `!meme cbg top text:bottom text` **Example with top text:** `!meme cbg text displayed on top` """ await self.displayMeme(channel, 'cbg', message) @styrobot.plugincommand('Condescending Wonka', name='wonka') async def _wonka_(self, server, channel, author, message): """ Generate a Condescending Wonka meme. `!meme wonka <message>` **Example with top & bottom text:** `!meme wonka top text:bottom text` **Example with top text:** `!meme wonka text displayed on top` """ await self.displayMeme(channel, 'wonka', message) @styrobot.plugincommand('Confession Bear', name='cb') async def _cb_(self, server, channel, author, message): """ Generate a Confession Bear meme. `!meme cb <message>` **Example with top & bottom text:** `!meme cb top text:bottom text` **Example with top text:** `!meme cb text displayed on top` """ await self.displayMeme(channel, 'cb', message) @styrobot.plugincommand('Conspiracy Keanu', name='keanu') async def _keanu_(self, server, channel, author, message): """ Generate a Conspiracy Keanu meme. `!meme keanu <message>` **Example with top & bottom text:** `!meme keanu top text:bottom text` **Example with top text:** `!meme keanu text displayed on top` """ await self.displayMeme(channel, 'keanu', message) @styrobot.plugincommand('Dating Site Murderer', name='dsm') async def _dsm_(self, server, channel, author, message): """ Generate a Dating Site Murderer meme. `!meme dsm <message>` **Example with top & bottom text:** `!meme dsm top text:bottom text` **Example with top text:** `!meme dsm text displayed on top` """ await self.displayMeme(channel, 'dsm', message) @styrobot.plugincommand('Do It Live!', name='live') async def _live_(self, server, channel, author, message): """ Generate a Do It Live! meme. `!meme live <message>` **Example with top & bottom text:** `!meme live top text:bottom text` **Example with top text:** `!meme live text displayed on top` """ await self.displayMeme(channel, 'live', message) @styrobot.plugincommand('Do You Want Ants?', name='ants') async def _ants_(self, server, channel, author, message): """ Generate a Do You Want Ants? meme. `!meme ants <message>` **Example with top & bottom text:** `!meme ants top text:bottom text` **Example with top text:** `!meme ants text displayed on top` """ await self.displayMeme(channel, 'ants', message) @styrobot.plugincommand('Doge', name='doge') async def _doge_(self, server, channel, author, message): """ Generate a Doge meme. `!meme doge <message>` **Example with top & bottom text:** `!meme doge top text:bottom text` **Example with top text:** `!meme doge text displayed on top` """ await self.displayMeme(channel, 'doge', message) @styrobot.plugincommand('Drake Always On Beat', name='alwaysonbeat') async def _alwaysonbeat_(self, server, channel, author, message): """ Generate a Drake Always On Beat meme. `!meme alwaysonbeat <message>` **Example with top & bottom text:** `!meme alwaysonbeat top text:bottom text` **Example with top text:** `!meme alwaysonbeat text displayed on top` """ await self.displayMeme(channel, 'alwaysonbeat', message) @styrobot.plugincommand('Ermahgerd', name='ermg') async def _ermg_(self, server, channel, author, message): """ Generate an Ermahgerd meme. `!meme ermg <message>` **Example with top & bottom text:** `!meme ermg top text:bottom text` **Example with top text:** `!meme ermg text displayed on top` """ await self.displayMeme(channel, 'ermg', message) @styrobot.plugincommand('Facepalm', name='facepalm') async def _facepalm_(self, server, channel, author, message): """ Generate a Facepalm meme. `!meme facepalm <message>` **Example with top & bottom text:** `!meme facepalm top text:bottom text` **Example with top text:** `!meme facepalm text displayed on top` """ await self.displayMeme(channel, 'facepalm', message) @styrobot.plugincommand('First World Problems', name='fwp') async def _fwp_(self, server, channel, author, message): """ Generate a First World Problems meme. `!meme fwp <message>` **Example with top & bottom text:** `!meme fwp top text:bottom text` **Example with top text:** `!meme fwp text displayed on top` """ await self.displayMeme(channel, 'fwp', message) @styrobot.plugincommand('Forever Alone', name='fa') async def _fa_(self, server, channel, author, message): """ Generate a Forever Alone meme. `!meme fa <message>` **Example with top & bottom text:** `!meme fa top text:bottom text` **Example with top text:** `!meme fa text displayed on top` """ await self.displayMeme(channel, 'fa', message) @styrobot.plugincommand('Foul Bachelor Frog', name='fbf') async def _fbf_(self, server, channel, author, message): """ Generate a Foul Bachelor Frog meme. `!meme fbf <message>` **Example with top & bottom text:** `!meme fbf top text:bottom text` **Example with top text:** `!meme fbf text displayed on top` """ await self.displayMeme(channel, 'fbf', message) @styrobot.plugincommand('Fuck Me, Right?', name='fmr') async def _fmr_(self, server, channel, author, message): """ Generate a Fuck Me, Right? meme. `!meme fmr <message>` **Example with top & bottom text:** `!meme fmr top text:bottom text` **Example with top text:** `!meme fmr text displayed on top` """ await self.displayMeme(channel, 'fmr', message) @styrobot.plugincommand('Futurama Fry', name='fry') async def _fry_(self, server, channel, author, message): """ Generate a Futurama Fry meme. `!meme fry <message>` **Example with top & bottom text:** `!meme fry top text:bottom text` **Example with top text:** `!meme fry text displayed on top` """ await self.displayMeme(channel, 'fry', message) @styrobot.plugincommand('Good Guy Greg', name='ggg') async def _ggg_(self, server, channel, author, message): """ Generate a Good Guy Greg meme. `!meme ggg <message>` **Example with top & bottom text:** `!meme ggg top text:bottom text` **Example with top text:** `!meme ggg text displayed on top` """ await self.displayMeme(channel, 'ggg', message) @styrobot.plugincommand('Hipster Barista', name='hipster') async def _hipster_(self, server, channel, author, message): """ Generate a Hipster Barista meme. `!meme hipster <message>` **Example with top & bottom text:** `!meme hipster top text:bottom text` **Example with top text:** `!meme hipster text displayed on top` """ await self.displayMeme(channel, 'hipster', message) @styrobot.plugincommand('I Can Has Cheezburger?', name='icanhas') async def _icanhas_(self, server, channel, author, message): """ Generate an I Can Has Cheezburger? meme. `!meme icanhas <message>` **Example with top & bottom text:** `!meme icanhas top text:bottom text` **Example with top text:** `!meme icanhas text displayed on top` """ await self.displayMeme(channel, 'icanhas', message) @styrobot.plugincommand('I Feel Like I\'m Taking Crazy Pills', name='crazypills') async def _crazypills_(self, server, channel, author, message): """ Generate an I Feel Like I'm Taking Crazy Pills meme. `!meme crazypills <message>` **Example with top & bottom text:** `!meme crazypills top text:bottom text` **Example with top text:** `!meme crazypills text displayed on top` """ await self.displayMeme(channel, 'crazypills', message) @styrobot.plugincommand('I Guarantee It', name='mw') async def _mw_(self, server, channel, author, message): """ Generate an I Guarantee It meme. `!meme mw <message>` **Example with top & bottom text:** `!meme mw top text:bottom text` **Example with top text:** `!meme mw text displayed on top` """ await self.displayMeme(channel, 'mw', message) @styrobot.plugincommand('I Have No Idea What I\'m Doing', name='noidea') async def _noidea_(self, server, channel, author, message): """ Generate an I Have No Idea What I'm Doing meme. `!meme noidea <message>` **Example with top & bottom text:** `!meme noidea top text:bottom text` **Example with top text:** `!meme noidea text displayed on top` """ await self.displayMeme(channel, 'noidea', message) @styrobot.plugincommand('I Immediately Regret This Decision!', name='regret') async def _regret_(self, server, channel, author, message): """ Generate an I Immediately Regret This Decision! meme. `!meme regret <message>` **Example with top & bottom text:** `!meme regret top text:bottom text` **Example with top text:** `!meme regret text displayed on top` """ await self.displayMeme(channel, 'regret', message) @styrobot.plugincommand('I Should Buy a Boat Cat', name='boat') async def _boat_(self, server, channel, author, message): """ Generate an I Should Buy a Boat Cat meme. `!meme boat <message>` **Example with top & bottom text:** `!meme boat top text:bottom text` **Example with top text:** `!meme boat text displayed on top` """ await self.displayMeme(channel, 'boat', message) @styrobot.plugincommand('I Should Not Have Said That', name='hagrid') async def _hagrid_(self, server, channel, author, message): """ Generate an I Should Not Have Said That meme. `!meme hagrid <message>` **Example with top & bottom text:** `!meme hagrid top text:bottom text` **Example with top text:** `!meme hagrid text displayed on top` """ await self.displayMeme(channel, 'hagrid', message) @styrobot.plugincommand('I Would Be So Happy', name='sohappy') async def _sohappy_(self, server, channel, author, message): """ Generate an I Would Be So Happy meme. `!meme sohappy <message>` **Example with top & bottom text:** `!meme sohappy top text:bottom text` **Example with top text:** `!meme sohappy text displayed on top` """ await self.displayMeme(channel, 'sohappy', message) @styrobot.plugincommand('I am the Captain Now', name='captain') async def _captain_(self, server, channel, author, message): """ Generate an I Would Be So Happy meme. `!meme captain <message>` **Example with top & bottom text:** `!meme captain top text:bottom text` **Example with top text:** `!meme captain text displayed on top` """ await self.displayMeme(channel, 'captain', message) @styrobot.plugincommand('Inigo Montoya', name='inigo') async def _inigo_(self, server, channel, author, message): """ Generate an Inigo Montoya meme. `!meme inigo <message>` **Example with top & bottom text:** `!meme inigo top text:bottom text` **Example with top text:** `!meme inigo text displayed on top` """ await self.displayMeme(channel, 'inigo', message) @styrobot.plugincommand('Insanity Wolf', name='iw') async def _iw_(self, server, channel, author, message): """ Generate an Insanity Wolf meme. `!meme iw <message>` **Example with top & bottom text:** `!meme iw top text:bottom text` **Example with top text:** `!meme iw text displayed on top` """ await self.displayMeme(channel, 'iw', message) @styrobot.plugincommand('It\'s A Trap!', name='ackbar') async def _ackbar_(self, server, channel, author, message): """ Generate an It's A Trap! meme. `!meme ackbar <message>` **Example with top & bottom text:** `!meme ackbar top text:bottom text` **Example with top text:** `!meme ackbar text displayed on top` """ await self.displayMeme(channel, 'ackbar', message) @styrobot.plugincommand('It\'s Happening', name='happening') async def _happening_(self, server, channel, author, message): """ Generate an It's Happening meme. `!meme happening <message>` **Example with top & bottom text:** `!meme happening top text:bottom text` **Example with top text:** `!meme happening text displayed on top` """ await self.displayMeme(channel, 'happening', message) @styrobot.plugincommand('It\'s Simple, Kill the Batman', name='joker') async def _joker_(self, server, channel, author, message): """ Generate an It's Simple, Kill the Batman meme. `!meme joker <message>` **Example with top & bottom text:** `!meme joker top text:bottom text` **Example with top text:** `!meme joker text displayed on top` """ await self.displayMeme(channel, 'joker', message) @styrobot.plugincommand('Jony Ive Redesigns Things', name='ive') async def _ive_(self, server, channel, author, message): """ Generate a Jony Ive Redesigns Things meme. `!meme ive <message>` **Example with top & bottom text:** `!meme ive top text:bottom text` **Example with top text:** `!meme ive text displayed on top` """ await self.displayMeme(channel, 'ive', message) @styrobot.plugincommand('Laughing Lizard', name='ll') async def _ll_(self, server, channel, author, message): """ Generate a Laughing Lizard meme. `!meme ll <message>` **Example with top & bottom text:** `!meme ll top text:bottom text` **Example with top text:** `!meme ll text displayed on top` """ await self.displayMeme(channel, 'll', message) @styrobot.plugincommand('Matrix Morpheus', name='morpheus') async def _morpheus_(self, server, channel, author, message): """ Generate a Matrix Morpheus meme. `!meme morpheus <message>` **Example with top & bottom text:** `!meme morpheus top text:bottom text` **Example with top text:** `!meme morpheus text displayed on top` """ await self.displayMeme(channel, 'morpheus', message) @styrobot.plugincommand('Milk Was a Bad Choice', name='badchoice') async def _badchoice_(self, server, channel, author, message): """ Generate a Milk Was a Bad Choice meme. `!meme badchoice <message>` **Example with top & bottom text:** `!meme badchoice top text:bottom text` **Example with top text:** `!meme badchoice text displayed on top` """ await self.displayMeme(channel, 'badchoice', message) @styrobot.plugincommand('Minor Mistake Marvin', name='mmm') async def _mmm_(self, server, channel, author, message): """ Generate a Minor Mistake Marvin meme. `!meme mmm <message>` **Example with top & bottom text:** `!meme mmm top text:bottom text` **Example with top text:** `!meme mmm text displayed on top` """ await self.displayMeme(channel, 'mmm', message) @styrobot.plugincommand('Nothing To Do Here', name='jetpack') async def _jetpack_(self, server, channel, author, message): """ Generate a Nothing To Do Here meme. `!meme jetpack <message>` **Example with top & bottom text:** `!meme jetpack top text:bottom text` **Example with top text:** `!meme jetpack text displayed on top` """ await self.displayMeme(channel, 'jetpack', message) @styrobot.plugincommand('Oh, I\'m Sorry, I Thought This Was America', name='imsorry') async def _imsorry_(self, server, channel, author, message): """ Generate an Oh, I'm Sorry, I Thought This Was America meme. `!meme imsorry <message>` **Example with top & bottom text:** `!meme imsorry top text:bottom text` **Example with top text:** `!meme imsorry text displayed on top` """ await self.displayMeme(channel, 'imsorry', message) @styrobot.plugincommand('Oh, Is That What We\'re Going to Do Today?', name='red') async def _red_(self, server, channel, author, message): """ Generate an Oh, Is That What We're Going to Do Today? meme. `!meme red <message>` **Example with top & bottom text:** `!meme red top text:bottom text` **Example with top text:** `!meme red text displayed on top` """ await self.displayMeme(channel, 'red', message) @styrobot.plugincommand('One Does Not Simply Walk into Mordor', name='mordor') async def _mordor_(self, server, channel, author, message): """ Generate an One Does Not Simply Walk into Mordor meme. `!meme mordor <message>` **Example with top & bottom text:** `!meme mordor one does not simply:walk into mordor` **Example with top text:** `!meme mordor one does not simply walk into mordor` """ await self.displayMeme(channel, 'mordor', message) @styrobot.plugincommand('Oprah You Get a Car', name='oprah') async def _oprah_(self, server, channel, author, message): """ Generate an Oprah You Get a Car meme. `!meme oprah <message>` **Example with top & bottom text:** `!meme oprah top text:bottom text` **Example with top text:** `!meme oprah text displayed on top` """ await self.displayMeme(channel, 'oprah', message) @styrobot.plugincommand('Overly Attached Girlfriend', name='oag') async def _oag_(self, server, channel, author, message): """ Generate an Overly Attached Girlfriend meme. `!meme oag <message>` **Example with top & bottom text:** `!meme oag top text:bottom text` **Example with top text:** `!meme oag text displayed on top` """ await self.displayMeme(channel, 'oag', message) @styrobot.plugincommand('Pepperidge Farm Remembers', name='remembers') async def _remembers_(self, server, channel, author, message): """ Generate a Pepperidge Farm Remembers meme. `!meme remembers <message>` **Example with top & bottom text:** `!meme remembers top text:bottom text` **Example with top text:** `!meme remembers text displayed on top` """ await self.displayMeme(channel, 'remembers', message) @styrobot.plugincommand('Philosoraptor', name='philosoraptor') async def _philosoraptor_(self, server, channel, author, message): """ Generate a Philosoraptor meme. `!meme philosoraptor <message>` **Example with top & bottom text:** `!meme philosoraptor top text:bottom text` **Example with top text:** `!meme philosoraptor text displayed on top` """ await self.displayMeme(channel, 'philosoraptor', message) @styrobot.plugincommand('Probably Not a Good Idea', name='jw') async def _jw_(self, server, channel, author, message): """ Generate a Probably Not a Good Idea meme. `!meme jw <message>` **Example with top & bottom text:** `!meme jw top text:bottom text` **Example with top text:** `!meme jw text displayed on top` """ await self.displayMeme(channel, 'jw', message) @styrobot.plugincommand('Push it somewhere else Patrick', name='patrick') async def _patrick_(self, server, channel, author, message): """ Generate a Push it somewhere else Patrick meme. `!meme patrick <message>` **Example with top & bottom text:** `!meme patrick top text:bottom text` **Example with top text:** `!meme patrick text displayed on top` """ await self.displayMeme(channel, 'patrick', message) @styrobot.plugincommand('Sad Barack Obama', name='sad-obama') async def _sadobama_(self, server, channel, author, message): """ Generate a Sad Barack Obama meme. `!meme sadobama <message>` **Example with top & bottom text:** `!meme sadobama top text:bottom text` **Example with top text:** `!meme sadobama text displayed on top` """ await self.displayMeme(channel, 'sad-obama', message) @styrobot.plugincommand('Sad Bill Clinton', name='sad-clinton') async def _sadclinton_(self, server, channel, author, message): """ Generate a Sad Bill Clinton meme. `!meme sadclinton <message>` **Example with top & bottom text:** `!meme sadclinton top text:bottom text` **Example with top text:** `!meme sadclinton text displayed on top` """ await self.displayMeme(channel, 'sad-clinton', message) @styrobot.plugincommand('Sad Frog / Feels Bad Man', name='sadfrog') async def _sadfrog_(self, server, channel, author, message): """ Generate a Sad Frog / Feels Bad Man meme. `!meme sadfrog <message>` **Example with top & bottom text:** `!meme sadfrog top text:bottom text` **Example with top text:** `!meme sadfrog text displayed on top` """ await self.displayMeme(channel, 'sadfrog', message) @styrobot.plugincommand('Sad George Bush', name='sad-bush') async def _sadbush_(self, server, channel, author, message): """ Generate a Sad George Bush meme. `!meme sadbush <message>` **Example with top & bottom text:** `!meme sadbush top text:bottom text` **Example with top text:** `!meme sadbush text displayed on top` """ await self.displayMeme(channel, 'sad-bush', message) @styrobot.plugincommand('Sad Joe Biden', name='sad-biden') async def _sadbiden_(self, server, channel, author, message): """ Generate a Sad Joe Biden meme. `!meme sadbiden <message>` **Example with top & bottom text:** `!meme sadbiden top text:bottom text` **Example with top text:** `!meme sadbiden text displayed on top` """ await self.displayMeme(channel, 'sad-biden', message) @styrobot.plugincommand('Sad John Boehner', name='sad-boehner') async def _sadboehner_(self, server, channel, author, message): """ Generate a Sad John Boehner meme. `!meme sadboehner <message>` **Example with top & bottom text:** `!meme sadboehner top text:bottom text` **Example with top text:** `!meme sadboehner text displayed on top` """ await self.displayMeme(channel, 'sad-boehner', message) @styrobot.plugincommand('Sarcastic Bear', name='sarcasticbear') async def _sarcasticbear_(self, server, channel, author, message): """ Generate a Sarcastic Bear meme. `!meme sarcasticbear <message>` **Example with top & bottom text:** `!meme sarcasticbear top text:bottom text` **Example with top text:** `!meme sarcasticbear text displayed on top` """ await self.displayMeme(channel, 'sarcasticbear', message) @styrobot.plugincommand('Schrute Facts', name='dwight') async def _dwight_(self, server, channel, author, message): """ Generate a Schrute Facts meme. `!meme dwight <message>` **Example with top & bottom text:** `!meme dwight top text:bottom text` **Example with top text:** `!meme dwight text displayed on top` """ await self.displayMeme(channel, 'dwight', message) @styrobot.plugincommand('Scumbag Brain', name='sb') async def _sb_(self, server, channel, author, message): """ Generate a Scumbag Brain meme. `!meme sb <message>` **Example with top & bottom text:** `!meme sb top text:bottom text` **Example with top text:** `!meme sb text displayed on top` """ await self.displayMeme(channel, 'sb', message) @styrobot.plugincommand('Scumbag Steve', name='ss') async def _ss_(self, server, channel, author, message): """ Generate a Scumbag Steve meme. `!meme ss <message>` **Example with top & bottom text:** `!meme ss top text:bottom text` **Example with top text:** `!meme ss text displayed on top` """ await self.displayMeme(channel, 'ss', message) @styrobot.plugincommand('Sealed Fate', name='sf') async def _sf_(self, server, channel, author, message): """ Generate a Sealed Fate meme. `!meme sf <message>` **Example with top & bottom text:** `!meme sf top text:bottom text` **Example with top text:** `!meme sf text displayed on top` """ await self.displayMeme(channel, 'sf', message) @styrobot.plugincommand('See? Nobody Cares', name='dodgson') async def _dodgson_(self, server, channel, author, message): """ Generate a See? Nobody Cares meme. `!meme dodgson <message>` **Example with top & bottom text:** `!meme dodgson top text:bottom text` **Example with top text:** `!meme dodgson text displayed on top` """ await self.displayMeme(channel, 'dodgson', message) @styrobot.plugincommand('Shut Up and Take My Money!', name='money') async def _money_(self, server, channel, author, message): """ Generate a Shut Up and Take My Money! meme. `!meme money <message>` **Example with top & bottom text:** `!meme money top text:bottom text` **Example with top text:** `!meme money text displayed on top` """ await self.displayMeme(channel, 'money', message) @styrobot.plugincommand('So Hot Right Now', name='sohot') async def _sohot_(self, server, channel, author, message): """ Generate a So Hot Right Now meme. `!meme sohot <message>` **Example with top & bottom text:** `!meme sohot top text:bottom text` **Example with top text:** `!meme sohot text displayed on top` """ await self.displayMeme(channel, 'sohot', message) @styrobot.plugincommand('So I Got That Goin\' For Me, Which is Nice', name='goinforme') async def _goinforme_(self, server, channel, author, message): """ Generate a So I Got That Goin' For Me, Which is Nice meme. `!meme goinforme <message>` **Example with top & bottom text:** `!meme goinforme top text:bottom text` **Example with top text:** `!meme goinforme text displayed on top` """ await self.displayMeme(channel, 'goinforme', message) @styrobot.plugincommand('Socially Awesome Awkward Penguin', name='awesome-awkward') async def _awesomeawkward_(self, server, channel, author, message): """ Generate a Socially Awesome Awkward Penguin meme. `!meme awesomeawkward <message>` **Example with top & bottom text:** `!meme awesomeawkward top text:bottom text` **Example with top text:** `!meme awesomeawkward text displayed on top` """ await self.displayMeme(channel, 'awesome-awkward', message) @styrobot.plugincommand('Socially Awesome Penguin', name='awesome') async def _awesome_(self, server, channel, author, message): """ Generate a Socially Awesome Penguin meme. `!meme awesome <message>` **Example with top & bottom text:** `!meme awesome top text:bottom text` **Example with top text:** `!meme awesome text displayed on top` """ await self.displayMeme(channel, 'awesome', message) @styrobot.plugincommand('Socially Awkward Awesome Penguin', name='awkward-awesome') async def _awkwardawesome_(self, server, channel, author, message): """ Generate a Socially Awkward Awesome Penguin meme. `!meme awkwardawesome <message>` **Example with top & bottom text:** `!meme awkwardawesome top text:bottom text` **Example with top text:** `!meme awkwardawesome text displayed on top` """ await self.displayMeme(channel, 'awkward-awesome', message) @styrobot.plugincommand('Socially Awkward Penguin', name='awkward') async def _awkward_(self, server, channel, author, message): """ Generate a Socially Awkward Penguin meme. `!meme awkward <message>` **Example with top & bottom text:** `!meme awkward top text:bottom text` **Example with top text:** `!meme awkward text displayed on top` """ await self.displayMeme(channel, 'awkward', message) @styrobot.plugincommand('Stop Trying to Make Fetch Happen', name='fetch') async def _fetch_(self, server, channel, author, message): """ Generate a Stop Trying to Make Fetch Happen meme. `!meme fetch <message>` **Example with top & bottom text:** `!meme fetch top text:bottom text` **Example with top text:** `!meme fetch text displayed on top` """ await self.displayMeme(channel, 'fetch', message) @styrobot.plugincommand('Success Kid', name='success') async def _success_(self, server, channel, author, message): """ Generate a Success Kid meme. `!meme success <message>` **Example with top & bottom text:** `!meme success top text:bottom text` **Example with top text:** `!meme success text displayed on top` """ await self.displayMeme(channel, 'success', message) @styrobot.plugincommand('Super Cool Ski Instructor', name='ski') async def _ski_(self, server, channel, author, message): """ Generate a Super Cool Ski Instructor meme. `!meme ski <message>` **Example with top & bottom text:** `!meme ski top text:bottom text` **Example with top text:** `!meme ski text displayed on top` """ await self.displayMeme(channel, 'ski', message) @styrobot.plugincommand('That Would Be Great', name='officespace') async def _officespace_(self, server, channel, author, message): """ Generate a That Would Be Great meme. `!meme officespace <message>` **Example with top & bottom text:** `!meme officespace top text:bottom text` **Example with top text:** `!meme officespace text displayed on top` """ await self.displayMeme(channel, 'officespace', message) @styrobot.plugincommand('The Most Interesting Man in the World', name='interesting') async def _interesting_(self, server, channel, author, message): """ Generate a The Most Interesting Man in the World meme. `!meme interesting <message>` **Example with top & bottom text:** `!meme interesting top text:bottom text` **Example with top text:** `!meme interesting text displayed on top` """ await self.displayMeme(channel, 'interesting', message) @styrobot.plugincommand('The Rent Is Too Damn High', name='toohigh') async def _toohigh_(self, server, channel, author, message): """ Generate a The Rent Is Too Damn High meme. `!meme toohigh <message>` **Example with top & bottom text:** `!meme toohigh top text:bottom text` **Example with top text:** `!meme toohigh text displayed on top` """ await self.displayMeme(channel, 'toohigh', message) @styrobot.plugincommand('This is Bull, Shark', name='bs') async def _bs_(self, server, channel, author, message): """ Generate a This is Bull, Shark meme. `!meme bs <message>` **Example with top & bottom text:** `!meme bs top text:bottom text` **Example with top text:** `!meme bs text displayed on top` """ await self.displayMeme(channel, 'bs', message) @styrobot.plugincommand('What is this, a Center for Ants?!', name='center') async def _center_(self, server, channel, author, message): """ Generate a This is Bull, Shark meme. `!meme bs <message>` **Example with top & bottom text:** `!meme bs top text:bottom text` **Example with top text:** `!meme bs text displayed on top` """ await self.displayMeme(channel, 'center', message) @styrobot.plugincommand('Why Not Both?', name='both') async def _both_(self, server, channel, author, message): """ Generate a Why Not Both? meme. `!meme both <message>` **Example with top & bottom text:** `!meme both top text:bottom text` **Example with top text:** `!meme both text displayed on top` """ await self.displayMeme(channel, 'both', message) @styrobot.plugincommand('Winter is coming', name='winter') async def _winter_(self, server, channel, author, message): """ Generate a Winter is coming meme. `!meme winter <message>` **Example with top & bottom text:** `!meme winter top text:bottom text` **Example with top text:** `!meme winter text displayed on top` """ await self.displayMeme(channel, 'winter', message) @styrobot.plugincommand('X all the Y', name='xy') async def _xy_(self, server, channel, author, message): """ Generate a X all the Y meme. `!meme xy <message>` **Example with top & bottom text:** `!meme xy top text:bottom text` **Example with top text:** `!meme xy text displayed on top` """ await self.displayMeme(channel, 'xy', message) @styrobot.plugincommand('X, X Everywhere', name='buzz') async def _buzz_(self, server, channel, author, message): """ Generate a X, X Everywhere meme. `!meme buzz <message>` **Example with top & bottom text:** `!meme buzz top text:bottom text` **Example with top text:** `!meme buzz text displayed on top` """ await self.displayMeme(channel, 'buzz', message) @styrobot.plugincommand('Xzibit Yo Dawg', name='yodawg') async def _yodawg_(self, server, channel, author, message): """ Generate a Xzibit Yo Dawg meme. `!meme yodawg <message>` **Example with top & bottom text:** `!meme yodawg top text:bottom text` **Example with top text:** `!meme yodawg text displayed on top` """ await self.displayMeme(channel, 'yodawg', message) @styrobot.plugincommand('Y U NO Guy', name='yuno') async def _yuno_(self, server, channel, author, message): """ Generate a Y U NO Guy meme. `!meme yuno <message>` **Example with top & bottom text:** `!meme yuno top text:bottom text` **Example with top text:** `!meme yuno text displayed on top` """ await self.displayMeme(channel, 'yuno', message) @styrobot.plugincommand('Y\'all Got Any More of Them', name='yallgot') async def _yallgot_(self, server, channel, author, message): """ Generate a Y'all Got Any More of Them meme. `!meme yallgot <message>` **Example with top & bottom text:** `!meme yallgot top text:bottom text` **Example with top text:** `!meme yallgot text displayed on top` """ await self.displayMeme(channel, 'yallgot', message) @styrobot.plugincommand('You Should Feel Bad', name='bad') async def _bad_(self, server, channel, author, message): """ Generate a You Should Feel Bad meme. `!meme bad <message>` **Example with top & bottom text:** `!meme bad top text:bottom text` **Example with top text:** `!meme bad text displayed on top` """ await self.displayMeme(channel, 'bad', message) @styrobot.plugincommand('You Sit on a Throne of Lies', name='elf') async def _elf_(self, server, channel, author, message): """ Generate a You Sit on a Throne of Lies meme. `!meme elf <message>` **Example with top & bottom text:** `!meme elf top text:bottom text` **Example with top text:** `!meme elf text displayed on top` """ await self.displayMeme(channel, 'elf', message) @styrobot.plugincommand('You Were the Chosen One!', name='chosen') async def _chosen_(self, server, channel, author, message): """ Generate a You Were the Chosen One! meme. `!meme chosen <message>` **Example with top & bottom text:** `!meme chosen top text:bottom text` **Example with top text:** `!meme chosen text displayed on top` """ await self.displayMeme(channel, 'chosen', message) def parseMeme(self, message): message = message.replace("?", "") if message.find(':') is not -1: top, bottom = message.split(':') return [top, bottom] else: return [message, ''] async def displayMeme(self, channel, type, message): message = self.parseMeme(message) if not message[0]: await self.bot.send_message(channel, 'That is not a valid meme string') self.logger.debug('[%s]: Invalid meme string: %s | %s', type, message[0], message[1]) return request = urllib.request.Request( 'https://memegen.link/{}/{}/{}.jpg'.format(type, message[0], message[1]), data = None, headers = { 'User-Agent':'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.19 (KHTML, like Gecko) Ubuntu/12.04 Chromium/18.0.1025.168 Chrome/18.0.1025.168 Safari/535.19' } ) self.logger.debug('Attempting to do API call to memegen') f = None try: f = urllib.request.urlopen(request) except urllib.error.HTTPError as ex: await self.bot.send_message(channel, 'Something seems to have gone wrong while creating the meme, please try again. If it continues to fail, try waiting a few minutes.') self.logger.debug('API call to memegen has failed with error: %s', ex) return self.logger.debug('API call to memegen has succeeded') filename = 'images/' + str(uuid.uuid1()) + '.jpg' output = open(filename, 'wb') output.write(f.read()) output.close() self.logger.debug('[%s]: %s | %s', type, message[0], message[1]) await self.bot.send_file(channel, filename) try: os.remove(filename) self.logger.debug('Deleted meme %s', filename) except OSError: self.logger.debug('Meme file %s does not exist', filename)
StyrofoamLaser/StyroBotPy
styrobot/plugins/meme.py
Python
mit
44,182
[ "Brian" ]
22ff54d8f9eda9a74aebad8f0f5e77889525c48369f9e245548e4190de4c2371
#! /usr/bin/env python3 # -*- coding: utf-8 -*- # Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import argparse import os import libcst as cst import pathlib import sys from typing import (Any, Callable, Dict, List, Sequence, Tuple) def partition( predicate: Callable[[Any], bool], iterator: Sequence[Any] ) -> Tuple[List[Any], List[Any]]: """A stable, out-of-place partition.""" results = ([], []) for i in iterator: results[int(predicate(i))].append(i) # Returns trueList, falseList return results[1], results[0] class recommendationengineCallTransformer(cst.CSTTransformer): CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { 'collect_user_event': ('parent', 'user_event', 'uri', 'ets', ), 'create_catalog_item': ('parent', 'catalog_item', ), 'create_prediction_api_key_registration': ('parent', 'prediction_api_key_registration', ), 'delete_catalog_item': ('name', ), 'delete_prediction_api_key_registration': ('name', ), 'get_catalog_item': ('name', ), 'import_catalog_items': ('parent', 'input_config', 'request_id', 'errors_config', ), 'import_user_events': ('parent', 'input_config', 'request_id', 'errors_config', ), 'list_catalog_items': ('parent', 'page_size', 'page_token', 'filter', ), 'list_prediction_api_key_registrations': ('parent', 'page_size', 'page_token', ), 'list_user_events': ('parent', 'page_size', 'page_token', 'filter', ), 'predict': ('name', 'user_event', 'page_size', 'page_token', 'filter', 'dry_run', 'params', 'labels', ), 'purge_user_events': ('parent', 'filter', 'force', ), 'update_catalog_item': ('name', 'catalog_item', 'update_mask', ), 'write_user_event': ('parent', 'user_event', ), } def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: try: key = original.func.attr.value kword_params = self.METHOD_TO_PARAMS[key] except (AttributeError, KeyError): # Either not a method from the API or too convoluted to be sure. return updated # If the existing code is valid, keyword args come after positional args. # Therefore, all positional args must map to the first parameters. args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) if any(k.keyword.value == "request" for k in kwargs): # We've already fixed this file, don't fix it again. return updated kwargs, ctrl_kwargs = partition( lambda a: a.keyword.value not in self.CTRL_PARAMS, kwargs ) args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) request_arg = cst.Arg( value=cst.Dict([ cst.DictElement( cst.SimpleString("'{}'".format(name)), cst.Element(value=arg.value) ) # Note: the args + kwargs looks silly, but keep in mind that # the control parameters had to be stripped out, and that # those could have been passed positionally or by keyword. for name, arg in zip(kword_params, args + kwargs)]), keyword=cst.Name("request") ) return updated.with_changes( args=[request_arg] + ctrl_kwargs ) def fix_files( in_dir: pathlib.Path, out_dir: pathlib.Path, *, transformer=recommendationengineCallTransformer(), ): """Duplicate the input dir to the output dir, fixing file method calls. Preconditions: * in_dir is a real directory * out_dir is a real, empty directory """ pyfile_gen = ( pathlib.Path(os.path.join(root, f)) for root, _, files in os.walk(in_dir) for f in files if os.path.splitext(f)[1] == ".py" ) for fpath in pyfile_gen: with open(fpath, 'r') as f: src = f.read() # Parse the code and insert method call fixes. tree = cst.parse_module(src) updated = tree.visit(transformer) # Create the path and directory structure for the new file. updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) updated_path.parent.mkdir(parents=True, exist_ok=True) # Generate the updated source file at the corresponding path. with open(updated_path, 'w') as f: f.write(updated.code) if __name__ == '__main__': parser = argparse.ArgumentParser( description="""Fix up source that uses the recommendationengine client library. The existing sources are NOT overwritten but are copied to output_dir with changes made. Note: This tool operates at a best-effort level at converting positional parameters in client method calls to keyword based parameters. Cases where it WILL FAIL include A) * or ** expansion in a method call. B) Calls via function or method alias (includes free function calls) C) Indirect or dispatched calls (e.g. the method is looked up dynamically) These all constitute false negatives. The tool will also detect false positives when an API method shares a name with another method. """) parser.add_argument( '-d', '--input-directory', required=True, dest='input_dir', help='the input directory to walk for python files to fix up', ) parser.add_argument( '-o', '--output-directory', required=True, dest='output_dir', help='the directory to output files fixed via un-flattening', ) args = parser.parse_args() input_dir = pathlib.Path(args.input_dir) output_dir = pathlib.Path(args.output_dir) if not input_dir.is_dir(): print( f"input directory '{input_dir}' does not exist or is not a directory", file=sys.stderr, ) sys.exit(-1) if not output_dir.is_dir(): print( f"output directory '{output_dir}' does not exist or is not a directory", file=sys.stderr, ) sys.exit(-1) if os.listdir(output_dir): print( f"output directory '{output_dir}' is not empty", file=sys.stderr, ) sys.exit(-1) fix_files(input_dir, output_dir)
googleapis/python-recommendations-ai
scripts/fixup_recommendationengine_v1beta1_keywords.py
Python
apache-2.0
7,056
[ "VisIt" ]
7699788e72855192fdf2b35c838cc5fd8eb7b79f605a56ecd25bbaf69536f449
from setuptools import setup setup( name = "electronfactors", version = "0.1.5", author = "Simon Biggs", author_email = "mail@simonbiggs.net", description = "Predict electron insert factors", long_description = """Predict electron insert factors to an uncertainty approaching measurement using the measurements you have already taken. You can start on a given energy/applicator/SSD combination if you have at least eight data points. For more information visit the gihub repository, https://github.com/SimonBiggs/electronfactors This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details.""", keywords = [], packages = [ "electronfactors", "electronfactors.ellipse", "electronfactors.inputs", "electronfactors.measurement", "electronfactors.model", "electronfactors.reports", "electronfactors.visuals", ], license='AGPL3+', classifiers = [], url = "https://github.com/SimonBiggs/electronfactors" )
SimonBiggs/electronfactors
setup.py
Python
agpl-3.0
1,178
[ "VisIt" ]
898302884262e9be2ac1e9f471e9348d39c103a0930077927fa270b7af5daaea
# Copyright (C) 2012,2013 # Max Planck Institute for Polymer Research # Copyright (C) 2008,2009,2010,2011 # Max-Planck-Institute for Polymer Research & Fraunhofer SCAI # # This file is part of ESPResSo++. # # ESPResSo++ is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # ESPResSo++ is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. r""" ******************************* espressopp.esutil.NormalVariate ******************************* .. function:: espressopp.esutil.NormalVariate(mean, sigma) :param mean: (default: 0.0) :param sigma: (default: 1.0) :type mean: real :type sigma: real """ from espressopp import pmi from _espressopp import esutil_NormalVariate class NormalVariateLocal(esutil_NormalVariate): def __init__(self, mean=0.0, sigma=1.0): if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup(): cxxinit(self, esutil_NormalVariate, mean, sigma) if pmi.isController: class NormalVariate(object): __metaclass__ = pmi.Proxy """A random normal variate.""" pmiproxydefs = dict( cls = 'espressopp.esutil.NormalVariateLocal', localcall = [ '__call__' ], )
govarguz/espressopp
src/esutil/NormalVariate.py
Python
gpl-3.0
1,741
[ "ESPResSo" ]
a5222ef920a1ca2a86dca98631f911205cd60e6873eab60a2290eddddf6f7de0
# coding: utf-8 # Copyright (c) Pymatgen Development Team. # Distributed under the terms of the MIT License. import numpy as np import warnings import functools from monty.json import MSONable from pymatgen.electronic_structure.core import Spin, Orbital from pymatgen.core.periodic_table import get_el_sp from pymatgen.core.structure import Structure from pymatgen.core.spectrum import Spectrum from pymatgen.util.coord import get_linear_interpolated_value from scipy.constants.codata import value as _cd """ This module defines classes to represent the density of states, etc. """ __author__ = "Shyue Ping Ong" __copyright__ = "Copyright 2012, The Materials Project" __version__ = "2.0" __maintainer__ = "Shyue Ping Ong" __email__ = "shyuep@gmail.com" __date__ = "Mar 20, 2012" class DOS(Spectrum): """ Replacement basic DOS object. All other DOS objects are extended versions of this object. Work in progress. Args: energies: A sequence of energies densities (ndarray): Either a Nx1 or a Nx2 array. If former, it is interpreted as a Spin.up only density. Otherwise, the first column is interpreted as Spin.up and the other is Spin.down. efermi: Fermi level energy. .. attribute: energies The sequence of energies .. attribute: densities A dict of spin densities, e.g., {Spin.up: [...], Spin.down: [...]} .. attribute: efermi Fermi level """ XLABEL = "Energy" YLABEL = "Density" def __init__(self, energies, densities, efermi): super(DOS, self).__init__(energies, densities, efermi) self.efermi = efermi def get_interpolated_gap(self, tol=0.001, abs_tol=False, spin=None): """ Expects a DOS object and finds the gap Args: tol: tolerance in occupations for determining the gap abs_tol: Set to True for an absolute tolerance and False for a relative one. spin: Possible values are None - finds the gap in the summed densities, Up - finds the gap in the up spin channel, Down - finds the gap in the down spin channel. Returns: (gap, cbm, vbm): Tuple of floats in eV corresponding to the gap, cbm and vbm. """ tdos = self.y if len(self.ydim) == 1 else np.sum(self.y, axis=1) if not abs_tol: tol = tol * tdos.sum() / tdos.shape[0] energies = self.x below_fermi = [i for i in range(len(energies)) if energies[i] < self.efermi and tdos[i] > tol] above_fermi = [i for i in range(len(energies)) if energies[i] > self.efermi and tdos[i] > tol] vbm_start = max(below_fermi) cbm_start = min(above_fermi) if vbm_start == cbm_start: return 0.0, self.efermi, self.efermi else: # Interpolate between adjacent values terminal_dens = tdos[vbm_start:vbm_start + 2][::-1] terminal_energies = energies[vbm_start:vbm_start + 2][::-1] start = get_linear_interpolated_value(terminal_dens, terminal_energies, tol) terminal_dens = tdos[cbm_start - 1:cbm_start + 1] terminal_energies = energies[cbm_start - 1:cbm_start + 1] end = get_linear_interpolated_value(terminal_dens, terminal_energies, tol) return end - start, end, start def get_cbm_vbm(self, tol=0.001, abs_tol=False, spin=None): """ Expects a DOS object and finds the cbm and vbm. Args: tol: tolerance in occupations for determining the gap abs_tol: An absolute tolerance (True) and a relative one (False) spin: Possible values are None - finds the gap in the summed densities, Up - finds the gap in the up spin channel, Down - finds the gap in the down spin channel. Returns: (cbm, vbm): float in eV corresponding to the gap """ # determine tolerance if spin is None: tdos = self.y if len(self.ydim) == 1 else np.sum(self.y, axis=1) elif spin == Spin.up: tdos = self.y[:, 0] else: tdos = self.y[:, 1] if not abs_tol: tol = tol * tdos.sum() / tdos.shape[0] # find index of fermi energy i_fermi = 0 while self.x[i_fermi] <= self.efermi: i_fermi += 1 # work backwards until tolerance is reached i_gap_start = i_fermi while i_gap_start - 1 >= 0 and tdos[i_gap_start - 1] <= tol: i_gap_start -= 1 # work forwards until tolerance is reached i_gap_end = i_gap_start while i_gap_end < tdos.shape[0] and tdos[i_gap_end] <= tol: i_gap_end += 1 i_gap_end -= 1 return self.x[i_gap_end], self.x[i_gap_start] def get_gap(self, tol=0.001, abs_tol=False, spin=None): """ Expects a DOS object and finds the gap. Args: tol: tolerance in occupations for determining the gap abs_tol: An absolute tolerance (True) and a relative one (False) spin: Possible values are None - finds the gap in the summed densities, Up - finds the gap in the up spin channel, Down - finds the gap in the down spin channel. Returns: gap in eV """ (cbm, vbm) = self.get_cbm_vbm(tol, abs_tol, spin) return max(cbm - vbm, 0.0) def __str__(self): """ Returns a string which can be easily plotted (using gnuplot). """ if Spin.down in self.densities: stringarray = ["#{:30s} {:30s} {:30s}".format("Energy", "DensityUp", "DensityDown")] for i, energy in enumerate(self.energies): stringarray.append("{:.5f} {:.5f} {:.5f}" .format(energy, self.densities[Spin.up][i], self.densities[Spin.down][i])) else: stringarray = ["#{:30s} {:30s}".format("Energy", "DensityUp")] for i, energy in enumerate(self.energies): stringarray.append("{:.5f} {:.5f}" .format(energy, self.densities[Spin.up][i])) return "\n".join(stringarray) class Dos(MSONable): """ Basic DOS object. All other DOS objects are extended versions of this object. Args: efermi: Fermi level energy energies: A sequences of energies densities ({Spin: np.array}): representing the density of states for each Spin. .. attribute: energies The sequence of energies .. attribute: densities A dict of spin densities, e.g., {Spin.up: [...], Spin.down: [...]} .. attribute: efermi Fermi level """ def __init__(self, efermi, energies, densities): self.efermi = efermi self.energies = np.array(energies) self.densities = {k: np.array(d) for k, d in densities.items()} def get_densities(self, spin=None): """ Returns the density of states for a particular spin. Args: spin: Spin Returns: Returns the density of states for a particular spin. If Spin is None, the sum of all spins is returned. """ if self.densities is None: result = None elif spin is None: if Spin.down in self.densities: result = self.densities[Spin.up] + self.densities[Spin.down] else: result = self.densities[Spin.up] else: result = self.densities[spin] return result def get_smeared_densities(self, sigma): """ Returns the Dict representation of the densities, {Spin: densities}, but with a Gaussian smearing of std dev sigma applied about the fermi level. Args: sigma: Std dev of Gaussian smearing function. Returns: Dict of Gaussian-smeared densities. """ from scipy.ndimage.filters import gaussian_filter1d smeared_dens = {} diff = [self.energies[i + 1] - self.energies[i] for i in range(len(self.energies) - 1)] avgdiff = sum(diff) / len(diff) for spin, dens in self.densities.items(): smeared_dens[spin] = gaussian_filter1d(dens, sigma / avgdiff) return smeared_dens def __add__(self, other): """ Adds two DOS together. Checks that energy scales are the same. Otherwise, a ValueError is thrown. Args: other: Another DOS object. Returns: Sum of the two DOSs. """ if not all(np.equal(self.energies, other.energies)): raise ValueError("Energies of both DOS are not compatible!") densities = {spin: self.densities[spin] + other.densities[spin] for spin in self.densities.keys()} return Dos(self.efermi, self.energies, densities) def get_interpolated_value(self, energy): """ Returns interpolated density for a particular energy. Args: energy: Energy to return the density for. """ f = {} for spin in self.densities.keys(): f[spin] = get_linear_interpolated_value(self.energies, self.densities[spin], energy) return f def get_interpolated_gap(self, tol=0.001, abs_tol=False, spin=None): """ Expects a DOS object and finds the gap Args: tol: tolerance in occupations for determining the gap abs_tol: Set to True for an absolute tolerance and False for a relative one. spin: Possible values are None - finds the gap in the summed densities, Up - finds the gap in the up spin channel, Down - finds the gap in the down spin channel. Returns: (gap, cbm, vbm): Tuple of floats in eV corresponding to the gap, cbm and vbm. """ tdos = self.get_densities(spin) if not abs_tol: tol = tol * tdos.sum() / tdos.shape[0] energies = self.energies below_fermi = [i for i in range(len(energies)) if energies[i] < self.efermi and tdos[i] > tol] above_fermi = [i for i in range(len(energies)) if energies[i] > self.efermi and tdos[i] > tol] vbm_start = max(below_fermi) cbm_start = min(above_fermi) if vbm_start == cbm_start: return 0.0, self.efermi, self.efermi else: # Interpolate between adjacent values terminal_dens = tdos[vbm_start:vbm_start + 2][::-1] terminal_energies = energies[vbm_start:vbm_start + 2][::-1] start = get_linear_interpolated_value(terminal_dens, terminal_energies, tol) terminal_dens = tdos[cbm_start - 1:cbm_start + 1] terminal_energies = energies[cbm_start - 1:cbm_start + 1] end = get_linear_interpolated_value(terminal_dens, terminal_energies, tol) return end - start, end, start def get_cbm_vbm(self, tol=0.001, abs_tol=False, spin=None): """ Expects a DOS object and finds the cbm and vbm. Args: tol: tolerance in occupations for determining the gap abs_tol: An absolute tolerance (True) and a relative one (False) spin: Possible values are None - finds the gap in the summed densities, Up - finds the gap in the up spin channel, Down - finds the gap in the down spin channel. Returns: (cbm, vbm): float in eV corresponding to the gap """ # determine tolerance tdos = self.get_densities(spin) if not abs_tol: tol = tol * tdos.sum() / tdos.shape[0] # find index of fermi energy i_fermi = 0 while self.energies[i_fermi] <= self.efermi: i_fermi += 1 # work backwards until tolerance is reached i_gap_start = i_fermi while i_gap_start - 1 >= 0 and tdos[i_gap_start - 1] <= tol: i_gap_start -= 1 # work forwards until tolerance is reached i_gap_end = i_gap_start while i_gap_end < tdos.shape[0] and tdos[i_gap_end] <= tol: i_gap_end += 1 i_gap_end -= 1 return self.energies[i_gap_end], self.energies[i_gap_start] def get_gap(self, tol=0.001, abs_tol=False, spin=None): """ Expects a DOS object and finds the gap. Args: tol: tolerance in occupations for determining the gap abs_tol: An absolute tolerance (True) and a relative one (False) spin: Possible values are None - finds the gap in the summed densities, Up - finds the gap in the up spin channel, Down - finds the gap in the down spin channel. Returns: gap in eV """ (cbm, vbm) = self.get_cbm_vbm(tol, abs_tol, spin) return max(cbm - vbm, 0.0) def __str__(self): """ Returns a string which can be easily plotted (using gnuplot). """ if Spin.down in self.densities: stringarray = ["#{:30s} {:30s} {:30s}".format("Energy", "DensityUp", "DensityDown")] for i, energy in enumerate(self.energies): stringarray.append("{:.5f} {:.5f} {:.5f}" .format(energy, self.densities[Spin.up][i], self.densities[Spin.down][i])) else: stringarray = ["#{:30s} {:30s}".format("Energy", "DensityUp")] for i, energy in enumerate(self.energies): stringarray.append("{:.5f} {:.5f}" .format(energy, self.densities[Spin.up][i])) return "\n".join(stringarray) @classmethod def from_dict(cls, d): """ Returns Dos object from dict representation of Dos. """ return Dos(d["efermi"], d["energies"], {Spin(int(k)): v for k, v in d["densities"].items()}) def as_dict(self): """ Json-serializable dict representation of Dos. """ return {"@module": self.__class__.__module__, "@class": self.__class__.__name__, "efermi": self.efermi, "energies": list(self.energies), "densities": {str(spin): list(dens) for spin, dens in self.densities.items()}} class FermiDos(Dos): """ This wrapper class helps relates the density of states, doping levels (i.e. carrier concentrations) and corresponding fermi levels. A negative doping concentration (c) means that the majority carriers are electrons (n-type doping) and positive c represents holes or p-type doping. Args: dos (pymatgen Dos class): density of states at corresponding energy levels structure (pymatgen Structure class): provided either as input or inside Dos (e.g. if CompleteDos used) nelecs (float): the number of electrons included in the energy range of dos. It is used for normalizing the densities. Default is the total number of electrons in the structure. bandgap (float): if set, the energy values are scissored so that the electronic band gap matches this value. """ def __init__(self, dos, structure=None, nelecs=None, bandgap=None): super(FermiDos, self).__init__( dos.efermi, energies=dos.energies, densities={k: np.array(d) for k, d in dos.densities.items()}) if structure is None: try: self.structure = dos.structure except: raise ValueError('"structure" not provided!') else: self.structure = structure self.volume = self.structure.volume self.energies = np.array(dos.energies) self.de = np.hstack((self.energies[1:], self.energies[-1])) - self.energies tdos = np.array(self.get_densities()) nelecs = nelecs or self.structure.composition.total_electrons # normalize total density of states based on integral at 0K self.tdos = tdos * nelecs / (tdos * self.de)[self.energies <= self.efermi].sum() ecbm, evbm = self.get_cbm_vbm() self.idx_vbm = np.argmin(abs(self.energies - evbm)) self.idx_cbm = np.argmin(abs(self.energies - ecbm)) self.A_to_cm = 1e-8 if bandgap: if self.efermi < ecbm and self.efermi > evbm: eref = self.efermi else: eref = (evbm + ecbm) / 2.0 idx_fermi = np.argmin(abs(self.energies - eref)) self.energies[:idx_fermi] -= (bandgap - (ecbm - evbm)) / 2.0 self.energies[idx_fermi:] += (bandgap - (ecbm - evbm)) / 2.0 def get_doping(self, fermi, T): """ Calculate the doping (majority carrier concentration) at a given fermi level and temperature. A simple Left Riemann sum is used for integrating the density of states over energy & equilibrium Fermi-Dirac distribution Args: fermi (float): the fermi level in eV T (float): the temperature in Kelvin Returns (float): in units 1/cm3. If negative it means that the majority carriers are electrons (n-type doping) and if positive holes/p-type """ cb_integral = np.sum(self.tdos[self.idx_cbm:] * f0(self.energies[self.idx_cbm:], fermi, T) * self.de[self.idx_cbm:], axis=0) vb_integral = np.sum(self.tdos[:self.idx_vbm + 1] * (1 - f0(self.energies[:self.idx_vbm + 1], fermi, T)) * self.de[:self.idx_vbm + 1], axis=0) return (vb_integral - cb_integral) / (self.volume * self.A_to_cm ** 3) def get_fermi_interextrapolated(self, c, T, warn=True, c_ref=1e10, **kwargs): """ Similar to get_fermi except that when get_fermi fails to converge, an interpolated or extrapolated fermi (depending on c) is returned with the assumption that the fermi level changes linearly with log(abs(c)). Args: c (float): doping concentration in 1/cm3. c<0 represents n-type doping and c>0 p-type doping (i.e. majority carriers are holes) T (float): absolute temperature in Kelvin warn (bool): whether to warn for the first time when no fermi can be found. c_ref (float): a doping concentration where get_fermi returns a value without error for both c_ref and -c_ref **kwargs: see keyword arguments of the get_fermi function Returns (float): the fermi level that is possibly interapolated or extrapolated and must be used with caution. """ try: return self.get_fermi(c, T, **kwargs) except ValueError as e: if warn: warnings.warn(str(e)) if abs(c) < c_ref: if abs(c) < 1e-10: c = 1e-10 # max(10, ) is to avoid log(0<x<1) and log(1+x) both of which are slow f2 = self.get_fermi_interextrapolated(max(10, abs(c) * 10.), T, warn=False, **kwargs) f1 = self.get_fermi_interextrapolated(-max(10, abs(c) * 10.), T, warn=False, **kwargs) c2 = np.log(abs(1 + self.get_doping(f2, T))) c1 = -np.log(abs(1 + self.get_doping(f1, T))) slope = (f2 - f1) / (c2 - c1) return f2 + slope * (np.sign(c) * np.log(abs(1 + c)) - c2) else: f_ref = self.get_fermi_interextrapolated(np.sign(c) * c_ref, T, warn=False, **kwargs) f_new = self.get_fermi_interextrapolated(c / 10., T, warn=False, **kwargs) clog = np.sign(c) * np.log(abs(c)) c_newlog = np.sign(c) * np.log(abs(self.get_doping(f_new, T))) slope = (f_new - f_ref) / (c_newlog - np.sign(c) * 10.) return f_new + slope * (clog - c_newlog) def get_fermi(self, c, T, rtol=0.01, nstep=50, step=0.1, precision=8): """ Finds the fermi level at which the doping concentration at the given temperature (T) is equal to c. A greedy algorithm is used where the relative error is minimized by calculating the doping at a grid which is continuously become finer. Args: c (float): doping concentration. c<0 represents n-type doping and c>0 represents p-type doping (i.e. majority carriers are holes) T (float): absolute temperature in Kelvin rtol (float): maximum acceptable relative error nstep (int): number of steps checked around a given fermi level step (float): initial step in fermi level when searching precision (int): essentially the decimal places of calculated fermi Returns (float): the fermi level. Note that this is different from the default dos.efermi. """ fermi = self.efermi # initialize target fermi for _ in range(precision): frange = np.arange(-nstep, nstep + 1) * step + fermi calc_doping = np.array([self.get_doping(f, T) for f in frange]) relative_error = abs(calc_doping / c - 1.0) fermi = frange[np.argmin(relative_error)] step /= 10.0 if min(relative_error) > rtol: raise ValueError('Could not find fermi within {}% of c={}'.format( rtol * 100, c)) return fermi class CompleteDos(Dos): """ This wrapper class defines a total dos, and also provides a list of PDos. Mainly used by pymatgen.io.vasp.Vasprun to create a complete Dos from a vasprun.xml file. You are unlikely to try to generate this object manually. Args: structure: Structure associated with this particular DOS. total_dos: total Dos for structure pdoss: The pdoss are supplied as an {Site:{Orbital:{ Spin:Densities}}} .. attribute:: structure Structure associated with the CompleteDos. .. attribute:: pdos Dict of partial densities of the form {Site:{Orbital:{Spin:Densities}}} """ def __init__(self, structure, total_dos, pdoss): super(CompleteDos, self).__init__( total_dos.efermi, energies=total_dos.energies, densities={k: np.array(d) for k, d in total_dos.densities.items()}) self.pdos = pdoss self.structure = structure def get_site_orbital_dos(self, site, orbital): """ Get the Dos for a particular orbital of a particular site. Args: site: Site in Structure associated with CompleteDos. orbital: Orbital in the site. Returns: Dos containing densities for orbital of site. """ return Dos(self.efermi, self.energies, self.pdos[site][orbital]) def get_site_dos(self, site): """ Get the total Dos for a site (all orbitals). Args: site: Site in Structure associated with CompleteDos. Returns: Dos containing summed orbital densities for site. """ site_dos = functools.reduce(add_densities, self.pdos[site].values()) return Dos(self.efermi, self.energies, site_dos) def get_site_spd_dos(self, site): """ Get orbital projected Dos of a particular site Args: site: Site in Structure associated with CompleteDos. Returns: dict of {orbital: Dos}, e.g. {"s": Dos object, ...} """ spd_dos = dict() for orb, pdos in self.pdos[site].items(): orbital_type = _get_orb_type(orb) if orbital_type in spd_dos: spd_dos[orbital_type] = add_densities(spd_dos[orbital_type], pdos) else: spd_dos[orbital_type] = pdos return {orb: Dos(self.efermi, self.energies, densities) for orb, densities in spd_dos.items()} def get_site_t2g_eg_resolved_dos(self, site): """ Get the t2g, eg projected DOS for a particular site. Args: site: Site in Structure associated with CompleteDos. Returns: A dict {"e_g": Dos, "t2g": Dos} containing summed e_g and t2g DOS for the site. """ t2g_dos = [] eg_dos = [] for s, atom_dos in self.pdos.items(): if s == site: for orb, pdos in atom_dos.items(): if orb in (Orbital.dxy, Orbital.dxz, Orbital.dyz): t2g_dos.append(pdos) elif orb in (Orbital.dx2, Orbital.dz2): eg_dos.append(pdos) return {"t2g": Dos(self.efermi, self.energies, functools.reduce(add_densities, t2g_dos)), "e_g": Dos(self.efermi, self.energies, functools.reduce(add_densities, eg_dos))} def get_spd_dos(self): """ Get orbital projected Dos. Returns: dict of {orbital: Dos}, e.g. {"s": Dos object, ...} """ spd_dos = {} for atom_dos in self.pdos.values(): for orb, pdos in atom_dos.items(): orbital_type = _get_orb_type(orb) if orbital_type not in spd_dos: spd_dos[orbital_type] = pdos else: spd_dos[orbital_type] = \ add_densities(spd_dos[orbital_type], pdos) return {orb: Dos(self.efermi, self.energies, densities) for orb, densities in spd_dos.items()} def get_element_dos(self): """ Get element projected Dos. Returns: dict of {Element: Dos} """ el_dos = {} for site, atom_dos in self.pdos.items(): el = site.specie for pdos in atom_dos.values(): if el not in el_dos: el_dos[el] = pdos else: el_dos[el] = add_densities(el_dos[el], pdos) return {el: Dos(self.efermi, self.energies, densities) for el, densities in el_dos.items()} def get_element_spd_dos(self, el): """ Get element and spd projected Dos Args: el: Element in Structure.composition associated with CompleteDos Returns: dict of {Element: {"S": densities, "P": densities, "D": densities}} """ el = get_el_sp(el) el_dos = {} for site, atom_dos in self.pdos.items(): if site.specie == el: for orb, pdos in atom_dos.items(): orbital_type = _get_orb_type(orb) if orbital_type not in el_dos: el_dos[orbital_type] = pdos else: el_dos[orbital_type] = \ add_densities(el_dos[orbital_type], pdos) return {orb: Dos(self.efermi, self.energies, densities) for orb, densities in el_dos.items()} @property def spin_polarization(self): """ Calculates spin polarization at Fermi level. See Sanvito et al., doi: 10.1126/sciadv.1602241 for an example usage. :return (float): spin polarization in range [0, 1], will also return NaN if spin polarization ill-defined (e.g. for insulator) """ n_F = self.get_interpolated_value(self.efermi) n_F_up = n_F[Spin.up] n_F_down = n_F[Spin.down] if (n_F_up + n_F_down) == 0: # only well defined for metals or half-mteals return float('NaN') spin_polarization = (n_F_up - n_F_down) / (n_F_up + n_F_down) return abs(spin_polarization) @classmethod def from_dict(cls, d): """ Returns CompleteDos object from dict representation. """ tdos = Dos.from_dict(d) struct = Structure.from_dict(d["structure"]) pdoss = {} for i in range(len(d["pdos"])): at = struct[i] orb_dos = {} for orb_str, odos in d["pdos"][i].items(): orb = Orbital[orb_str] orb_dos[orb] = {Spin(int(k)): v for k, v in odos["densities"].items()} pdoss[at] = orb_dos return CompleteDos(struct, tdos, pdoss) def as_dict(self): """ Json-serializable dict representation of CompleteDos. """ d = {"@module": self.__class__.__module__, "@class": self.__class__.__name__, "efermi": self.efermi, "structure": self.structure.as_dict(), "energies": list(self.energies), "densities": {str(spin): list(dens) for spin, dens in self.densities.items()}, "pdos": []} if len(self.pdos) > 0: for at in self.structure: dd = {} for orb, pdos in self.pdos[at].items(): dd[str(orb)] = {"densities": {str(int(spin)): list(dens) for spin, dens in pdos.items()}} d["pdos"].append(dd) d["atom_dos"] = {str(at): dos.as_dict() for at, dos in self.get_element_dos().items()} d["spd_dos"] = {str(orb): dos.as_dict() for orb, dos in self.get_spd_dos().items()} return d def __str__(self): return "Complete DOS for " + str(self.structure) class LobsterCompleteDos(CompleteDos): """ Extended CompleteDOS for Lobster """ def get_site_orbital_dos(self, site, orbital): """ Get the Dos for a particular orbital of a particular site. Args: site: Site in Structure associated with CompleteDos. orbital: principal quantum number and orbital in string format, e.g. "4s". possible orbitals are: "s", "p_y", "p_z", "p_x", "d_xy", "d_yz", "d_z^2", "d_xz", "d_x^2-y^2", "f_y(3x^2-y^2)", "f_xyz", "f_yz^2", "f_z^3", "f_xz^2", "f_z(x^2-y^2)", "f_x(x^2-3y^2)" In contrast to the Cohpcar and the Cohplist objects, the strings from the Lobster files are used Returns: Dos containing densities of an orbital of a specific site. """ if orbital[1:] not in ["s", "p_y", "p_z", "p_x", "d_xy", "d_yz", "d_z^2", "d_xz", "d_x^2-y^2", "f_y(3x^2-y^2)", "f_xyz", "f_yz^2", "f_z^3", "f_xz^2", "f_z(x^2-y^2)", "f_x(x^2-3y^2)"]: raise ValueError('orbital is not correct') else: return Dos(self.efermi, self.energies, self.pdos[site][orbital]) def get_site_t2g_eg_resolved_dos(self, site): """ Get the t2g, eg projected DOS for a particular site. Args: site: Site in Structure associated with CompleteDos. Returns: A dict {"e_g": Dos, "t2g": Dos} containing summed e_g and t2g DOS for the site. """ warnings.warn("Are the orbitals correctly oriented? Are you sure?") t2g_dos = [] eg_dos = [] for s, atom_dos in self.pdos.items(): if s == site: for orb, pdos in atom_dos.items(): if _get_orb_lobster(orb) in (Orbital.dxy, Orbital.dxz, Orbital.dyz): t2g_dos.append(pdos) elif _get_orb_lobster(orb) in (Orbital.dx2, Orbital.dz2): eg_dos.append(pdos) return {"t2g": Dos(self.efermi, self.energies, functools.reduce(add_densities, t2g_dos)), "e_g": Dos(self.efermi, self.energies, functools.reduce(add_densities, eg_dos))} def get_spd_dos(self): """ Get orbital projected Dos. For example, if 3s and 4s are included in the basis of some element, they will be both summed in the orbital projected DOS Returns: dict of {orbital: Dos}, e.g. {"s": Dos object, ...} """ spd_dos = {} for atom_dos in self.pdos.values(): for orb, pdos in atom_dos.items(): orbital_type = _get_orb_type_lobster(orb) if orbital_type not in spd_dos: spd_dos[orbital_type] = pdos else: spd_dos[orbital_type] = \ add_densities(spd_dos[orbital_type], pdos) return {orb: Dos(self.efermi, self.energies, densities) for orb, densities in spd_dos.items()} def get_element_spd_dos(self, el): """ Get element and spd projected Dos Args: el: Element in Structure.composition associated with LobsterCompleteDos Returns: dict of {Element: {"S": densities, "P": densities, "D": densities}} """ el = get_el_sp(el) el_dos = {} for site, atom_dos in self.pdos.items(): if site.specie == el: for orb, pdos in atom_dos.items(): orbital_type = _get_orb_type_lobster(orb) if orbital_type not in el_dos: el_dos[orbital_type] = pdos else: el_dos[orbital_type] = \ add_densities(el_dos[orbital_type], pdos) return {orb: Dos(self.efermi, self.energies, densities) for orb, densities in el_dos.items()} @classmethod def from_dict(cls, d): """ Returns: CompleteDos object from dict representation. """ tdos = Dos.from_dict(d) struct = Structure.from_dict(d["structure"]) pdoss = {} for i in range(len(d["pdos"])): at = struct[i] orb_dos = {} for orb_str, odos in d["pdos"][i].items(): orb = orb_str orb_dos[orb] = {Spin(int(k)): v for k, v in odos["densities"].items()} pdoss[at] = orb_dos return LobsterCompleteDos(struct, tdos, pdoss) def add_densities(density1, density2): """ Method to sum two densities. Args: density1: First density. density2: Second density. Returns: Dict of {spin: density}. """ return {spin: np.array(density1[spin]) + np.array(density2[spin]) for spin in density1.keys()} def _get_orb_type(orb): try: return orb.orbital_type except AttributeError: return orb def f0(E, fermi, T): """ Returns the equilibrium fermi-dirac. Args: E (float): energy in eV fermi (float): the fermi level in eV T (float): the temperature in kelvin """ return 1. / (1. + np.exp((E - fermi) / (_cd("Boltzmann constant in eV/K") * T))) def _get_orb_type_lobster(orb): """ Args: orb: string representation of orbital Returns: OrbitalType """ orb_labs = ["s", "p_y", "p_z", "p_x", "d_xy", "d_yz", "d_z^2", "d_xz", "d_x^2-y^2", "f_y(3x^2-y^2)", "f_xyz", "f_yz^2", "f_z^3", "f_xz^2", "f_z(x^2-y^2)", "f_x(x^2-3y^2)"] try: orbital = Orbital(orb_labs.index(orb[1:])) return orbital.orbital_type except AttributeError: print("Orb not in list") def _get_orb_lobster(orb): """ Args: orb: string representation of orbital Returns: Orbital """ orb_labs = ["s", "p_y", "p_z", "p_x", "d_xy", "d_yz", "d_z^2", "d_xz", "d_x^2-y^2", "f_y(3x^2-y^2)", "f_xyz", "f_yz^2", "f_z^3", "f_xz^2", "f_z(x^2-y^2)", "f_x(x^2-3y^2)"] try: orbital = Orbital(orb_labs.index(orb[1:])) return orbital except AttributeError: print("Orb not in list")
montoyjh/pymatgen
pymatgen/electronic_structure/dos.py
Python
mit
37,136
[ "DIRAC", "Gaussian", "VASP", "pymatgen" ]
6f36fb7bc1bef011da10812fa3106d5ca18a52d286badc2fd1ece8383c288c65
from __future__ import print_function from itertools import islice, product import logging import MDAnalysis as md import math import random import numpy as np import pandas as pd import plotly import plotly.graph_objs as go import subprocess import scipy import scipy.stats import string import time import settings class Atom(object): def __init__(self, identifier, **kwargs): self.id = identifier self.type = kwargs.get('type', None) self.element = kwargs.get('element', None) self.xyz = kwargs.get('xyz', None) self.stress = kwargs.get('stress', None) self.normal = kwargs.get('normal', False) self.distance = None self.sin_theta = None self.cos_theta = None self.sin_phi = None self.cos_phi = None self.spherical_stress = None self.voro_volume = 0 def calc_spherical_stress(self): """ Calculate spherical stress tensor from cartesian one ref: http://www.brown.edu/Departments/Engineering/Courses/En221/Notes/Polar_Coords/Polar_Coords.htm """ xx, yy, zz, xy, xz, yz = self.stress cart = np.array( [ [xx, xy, xz], [xy, yy, yz], [xz, yz, zz] ] ) # 1 for theta, the angle between xyz and z axis, 2 for phi, # angle between x axis and the projection on xy-plane sin1 = self.sin_theta cos1 = self.cos_theta sin2 = self.sin_phi cos2 = self.cos_phi conv = np.array( [ [sin1*cos2, cos1*cos2, -sin2], [sin1*sin2, cos1*sin2, -cos2], [cos1, -sin1, 0], ] ) sphe = np.dot( conv, cart.dot( np.transpose(conv) ) ) # Of format [ [rr, rTheta, rPhi], [rTheta, thetaTheta, thetaPhi], [rPhi, thetaPhi, phiPhi] ] self.spherical_stress = sphe class Box(object): PI = 3.1415926 def __init__(self, timestep=0, radius=None, use_atomic_volume=True, average_on_atom=False, **kwargs): # Current timestep. self.timestep = timestep # Maximum bubble radius in box. self.radius = radius self.count = 0 # XYZ boundaries. self.bx = kwargs.get('bx', None) self.by = kwargs.get('by', None) self.bz = kwargs.get('bz', None) # Bubble center coordinates. self._center = kwargs.get('center', None) # All atoms. self.atoms = [] # Container of atoms for each element. self._elements = {} # Container of shell stress for each element. self._shell_stress = {} self._shell_stress_r = {} self._shell_stress_theta = {} self._shell_stress_phi = { } # Container of shell atom count for each element. self.nbins = None self._shell_atoms = {} self._shell_atom_objs = [] self._shell_volumes = {} # Indicator of stats status. self._stats_finished = False self._measured = False # Dump atom coordinates to calculate voro tessellation volume self.voro_file_name = 'atom_coors' self.use_atomic_volume = use_atomic_volume self.average_on_atom = average_on_atom @property def measured(self): """Returns true if all atoms have a distance (to bubble center).""" if all([x.distance for x in self.atoms]): self._measured = True else: self._measured = False return self._measured @property def center(self): return self._center @center.setter def center(self, coor): self._center = coor self._measured = False self._stats_finished = False def combine_water_atoms(self): """ Combine H and O together into a new particle stress = S_h + S_o coor = center of mass The sequency of H and O atoms are O H H """ self._old_atoms = self.atoms self.atoms = [] self._old_atoms.sort( key=lambda x: x.id ) water = [] for atom in self._old_atoms: if atom.element not in ['H', 'O']: self.atoms.append( atom ) else: water.append(atom) if len( water ) == 3: # need to combine the 3 atoms into 1 now assert [ _ele.element for _ele in water ] == ['O', 'H', 'H'] new_stress = [a+b+c for a, b, c in zip(water[0].stress, water[1].stress, water[2].stress)] new_volume = sum( _ele.voro_volume for _ele in water ) masses = [ 16 if _ele.element == 'O' else 1 for _ele in water ] xs = [ _ele.xyz[0] for _ele in water] ys = [ _ele.xyz[ 1 ] for _ele in water ] zs = [ _ele.xyz[ 2 ] for _ele in water ] cx = sum( m*x for m,x in zip(masses, xs) ) / sum(masses) cy = sum( m * y for m, y in zip( masses, ys ) ) / sum( masses ) cz = sum( m * z for m, z in zip( masses, zs ) ) / sum( masses ) new_xyz = (cx, cy, cz) new_id = water[0].id normal = water[0].normal self.atoms.append( Atom(new_id, type=3, element='H', xyz=new_xyz, stress=new_stress, normal=normal) ) water = [] def dump_atoms_for_voro( self, length=None ): ''' Dump atom coordinates so we can calculate Voronoi tessellation using Voro++ from http://math.lbl.gov/voro++/ The input file format for voro++ is <atom id> <x> <y> <z> and output file format is <atom id> <x> <y> <z> <tessellation volume> ''' logging.info( 'Dump atom coordinates to {}'.format( self.voro_file_name ) ) fmt = '{} {} {} {}\n' if length: xmin, xmax = self.center[0] - length, self.center[0] + length ymin, ymax = self.center[1] - length, self.center[1] + length zmin, zmax = self.center[2] - length, self.center[2] + length with open( self.voro_file_name, 'w' ) as output: for atom in self.atoms: x, y, z = atom.xyz if length: if xmin <= x <= xmax and ymin<= y <= ymax and zmin <= z <= zmax: output.write( fmt.format( atom.id, x, y, z ) ) else: output.write( fmt.format( atom.id, x, y, z ) ) def voro_cmd( self, gnuplot=False, length=None ): ''' CMD to run voro++ in bash gnuplot=True will also export gnu plot file. Be careful when system is large as this file will be extremely large default to use -o to preserve the atom order. This has small memory and performance impact as the documentation says. ''' # when have length -o will not work cmd = 'voro++' if length else 'voro++ -o' fmt = cmd + ' {opts} {{xmin}} {{xmax}} {{ymin}} {{ymax}} {{zmin}} {{zmax}} {{infile}}' opts = '-g' if gnuplot else '' fmt = fmt.format( opts=opts ) if length: xmin, xmax = self.center[0] - length, self.center[0] + length ymin, ymax = self.center[1] - length, self.center[1] + length zmin, zmax = self.center[2] - length, self.center[2] + length else: xmin, xmax = self.bx ymin, ymax = self.by zmin, zmax = self.bz return fmt.format( xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax, zmin=zmin, zmax=zmax, infile=self.voro_file_name) def run_voro_cmd( self, gnuplot=False, length=None ): logging.info( 'Calculating voro volumes for atoms' ) cmd = self.voro_cmd( gnuplot=gnuplot, length=length ) logging.info( "Running: {}".format( cmd )) sp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) out, err = sp.communicate() if err: raise Exception(err) logging.info( "Finished: {}".format( cmd ) ) def read_voro_volumes( self ): voro_out = self.voro_file_name + '.vol' logging.info( 'Reading voro volumes from {}'.format( voro_out ) ) with open( voro_out, 'r' ) as volumes: idx = 0 for line in volumes: atom_id, x, y, z, vol = [ float(ele) for ele in line.split() ] atom_id = int( atom_id ) atom = self.atoms[ idx ] try: assert( atom.id == atom_id ) except Exception as e: print( atom.id, atom_id ) raise e atom.voro_volume = vol idx += 1 def calc_voro_volumes( self, gnuplot=False, length=None ): ''' Calculate voro tessellation volume using voro ''' self.dump_atoms_for_voro( length=length ) self.run_voro_cmd( gnuplot=gnuplot, length=length ) if not length: self.read_voro_volumes() def adjust_water_vol(self, ratio=(0.5, 0.25)): """ Adjust volume of H and O in water. For pure water system only """ satoms = sorted( self.atoms, key= lambda x: x.id) assert( len( satoms ) % 3 == 0 ) assert( ratio[0] + 2 * ratio[1] == 1.0) for idx in xrange( len(satoms) / 3): o = satoms[ idx * 3 ] h1 = satoms[ idx * 3 + 1 ] h2 = satoms[ idx * 3 + 2 ] vsum = sum( ele.voro_volume for ele in [o, h1, h2]) vo = ratio[0] * vsum vh = ratio[1] * vsum o.adj_vol = vo h1.adj_vol = vh h2.adj_vol = vh def set_boundary(self, bx, by, bz): """Set bx by bz together.""" self.bx = bx self.by = by self.bz = bz def add_atom(self, atom): self.atoms.append(atom) self.count += 1 # Need to run stats after new atom added. self._stats_finished = False if atom.element in self._elements: self._elements[atom.element].append(atom) else: self._elements[atom.element] = [atom] def measure(self): """Measure distance to bubble center for each atom.""" for atom in self.atoms: coor = np.array(atom.xyz) atom.distance = np.linalg.norm(coor - self.center) if atom.normal: # Calculate sin cos for theta and phi dx = coor[0] - self.center[0] dy = coor[1] - self.center[1] dz = coor[2] - self.center[2] xy_square = math.sqrt(dx*dx + dy*dy) atom.sin_theta = xy_square / atom.distance atom.cos_theta = dz / atom.distance atom.sin_phi = dy / xy_square atom.cos_phi = dx / xy_square self.calc_voro_volumes() def stats(self, dr, normal): """ System stats. Generate data for atom stats and stress stats for each element. self._shell_atoms = {} self._shell_stress = {} """ if not self.measured: raise AtomUnmeasuredError("Some atoms are unmeasuerd") self.nbins = int(math.ceil(self.radius / float(dr))) self._shell_atom_objs = [ { } for x in range( self.nbins ) ] for ele, atoms in self._elements.iteritems(): # Do stats for each element. for atom in atoms: if atom.distance < self.radius: shell_idx = int( atom.distance / dr ) self._shell_atom_objs[ shell_idx ].setdefault(ele, []).append( atom ) if normal: atom.calc_spherical_stress() self._stats_finished = True def atom_stats(self, element, dr): """Atom ratio stats inside bubble.""" if not self._stats_finished: self.stats(dr) nbins = len(self._shell_atoms[element]) bubble_atoms = {} # Init bubble atoms by copying shell atoms for ele, count in self._shell_atoms.iteritems(): bubble_atoms[ele] = [x for x in count] for i in range(1, nbins): bubble_atoms[ele][i] += bubble_atoms[ele][i - 1] bubble_atoms[ele] = np.array(bubble_atoms[ele]) return bubble_atoms[element] / sum(bubble_atoms.values()) def pressure_stats(self, elements, dr): """Average pressure stats inside bubble for species in elements.""" if not self._stats_finished: self.stats(dr) nbins = len(self._shell_stress[elements[0]]) # Calculate stress for all element in elements as whole. # Convert numpy.Array to mutable list. stress_in = [x for x in sum([self._shell_stress[ele] for ele in elements])] stress_out = [x for x in stress_in] for i in range(1, nbins): # Cumulative stress. stress_in[i] += stress_in[i-1] stress_out[nbins - 1 - i] += stress_out[nbins - i] for i in range(1, nbins): # Stress -> pressure. stress_in[i] = 0 - stress_in[i] / self.vol_sphere((i+1)*dr) / 3.0 stress_out[nbins-1-i] = 0 - stress_out[nbins-1-i] / (self.vol_sphere(self.radius) - self.vol_sphere((nbins-i-1)*dr)) / 3 # Head and tail. stress_in[0] = 0 - stress_in[0] / self.vol_sphere(dr) / 3 stress_out[nbins - 1] = 0 - stress_out[nbins - 1] / (self.vol_sphere(self.radius) - self.vol_sphere((nbins - 1)*dr)) / 3 return {'in': stress_in, 'out': stress_out} def shell_pressure_stats(self, elements, dr, normal=False): """Average pressure of elements inside shell.""" self.stats(dr, normal=normal) print( "NNNNNumber of bins: {}".format(self.nbins) ) if not normal: # atom.stress has 3 elements, xx yy zz components if self.use_atomic_volume: if self.average_on_atom: # atomic volume is used, pressure is calculated for each atom and then averaged together stress = [] for idx, shell_atoms in enumerate(self._shell_atom_objs): pressure_raw = {} for element, atoms in shell_atoms.iteritems(): if element in elements: # P = -(S_xx + S_yy + S_zz)/3/V pressure_raw[element] = [ - sum(atom.stress)/atom.voro_volume/3.0 for atom in atoms ] # Average pressure = sum(Pressure)/n_atoms n_atoms = sum( len(_ele) for _ele in pressure_raw.values() ) if n_atoms != 0: pressure_ave = sum( sum(_ele) for _ele in pressure_raw.values() ) / n_atoms else: pressure_ave = 0 stress.append(pressure_ave) return stress else: # pressure is calculated as sum(atom stress in a shell) / sum(atom volume in a shell) stress = [] for idx, shell_atoms in enumerate( self._shell_atom_objs ): stress_all = 0 volume_all = 0 for element, atoms in shell_atoms.iteritems(): if element in elements: stress_all += sum( sum(atom.stress[:3]) for atom in atoms ) volume_all += sum( atom.voro_volume for atom in atoms ) if volume_all != 0: pressure_ave = - stress_all / 3.0 / volume_all else: pressure_ave = 0 stress.append( pressure_ave ) return stress else: # use shell volume stress = [ ] for idx, shell_atoms in enumerate( self._shell_atom_objs ): r_min, r_max = idx * dr, (idx + 1)*dr stress_all = 0 volume_all = self.vol_sphere(r_max) - self.vol_sphere(r_min) for element, atoms in shell_atoms.iteritems(): if element in elements: stress_all += sum( sum( atom.stress[:3] ) for atom in atoms ) pressure_ave = - stress_all / 3.0 / volume_all stress.append( pressure_ave ) return stress else: # normal pressure, atom.spherical_stress has 6 items: xx, yy, zz, xy, xz, yz. stress_r = [] stress_theta = [] stress_phi = [] if self.use_atomic_volume: if self.average_on_atom: # Pressure is calculate as average of pressure on each atom for idx, shell_atoms in enumerate( self._shell_atom_objs ): pressure_r_raw = {} pressure_theta_raw = {} pressure_phi_raw = {} for element, atoms in shell_atoms.iteritems(): if element in elements: pressure_r_raw[element] = [ - atom.spherical_stress[0][0] / atom.voro_volume for atom in atoms ] pressure_theta_raw[element] = [ - atom.spherical_stress[1][1] / atom.voro_volume for atom in atoms ] pressure_phi_raw[element] = [ - atom.spherical_stress[2][2] / atom.voro_volume for atom in atoms ] n_atoms = sum( len( _ele ) for _ele in pressure_r_raw.values() ) if n_atoms != 0: pressure_r_ave = sum( sum(_ele) for _ele in pressure_r_raw.values() ) / n_atoms pressure_theta_ave = sum( sum(_ele) for _ele in pressure_theta_raw.values() ) / n_atoms pressure_phi_ave = sum( sum(_ele) for _ele in pressure_phi_raw.values() ) / n_atoms else: pressure_r_ave = pressure_theta_ave = pressure_phi_ave = 0 stress_r.append( pressure_r_ave ) stress_theta.append( pressure_theta_ave ) stress_phi.append( pressure_phi_ave ) return { 'r': stress_r, 'theta': stress_theta, 'phi': stress_phi, } else: # Pressure is calculated as sum(stress)/sum(atomic_volume) for idx, shell_atoms in enumerate( self._shell_atom_objs ): stress_r_all = 0 stress_theta_all = 0 stress_phi_all = 0 volume_all = 0 for element, atoms in shell_atoms.iteritems(): if element in elements: stress_r_all += sum( atom.spherical_stress[0][0] for atom in atoms ) stress_theta_all += sum( atom.spherical_stress[1][1] for atom in atoms ) stress_phi_all += sum( atom.spherical_stress[2][2] for atom in atoms ) volume_all += sum( atom.voro_volume for atom in atoms ) if volume_all != 0: pressure_r_ave = - stress_r_all / volume_all pressure_theta_ave = - stress_theta_all / volume_all pressure_phi_ave = - stress_phi_all / volume_all else: pressure_r_ave = pressure_theta_ave = pressure_phi_ave = 0 stress_r.append( pressure_r_ave ) stress_theta.append( pressure_theta_ave ) stress_phi.append( pressure_phi_ave ) return { 'r': stress_r, 'theta': stress_theta, 'phi': stress_phi, } else: # Use shell volume for idx, shell_atoms in enumerate( self._shell_atom_objs ): r_min, r_max = idx * dr, (idx+1) * dr stress_r_all = 0 stress_theta_all = 0 stress_phi_all = 0 volume_all = self.vol_sphere(r_max) - self.vol_sphere(r_min) for element, atoms in shell_atoms.iteritems(): if element in elements: stress_r_all += sum( atom.spherical_stress[ 0 ][ 0 ] for atom in atoms ) stress_theta_all += sum( atom.spherical_stress[ 1 ][ 1 ] for atom in atoms ) stress_phi_all += sum( atom.spherical_stress[ 2 ][ 2 ] for atom in atoms ) pressure_r_ave = - stress_r_all / volume_all pressure_theta_ave = - stress_theta_all / volume_all pressure_phi_ave = - stress_phi_all / volume_all stress_r.append( pressure_r_ave ) stress_theta.append( pressure_theta_ave ) stress_phi.append( pressure_phi_ave ) return { 'r': stress_r, 'theta': stress_theta, 'phi': stress_phi, } def pressure_between(self, rlow, rhigh): """Return the average pressure and number of atoms between rlow and rhigh.""" stress = 0 count = 0 for atom in self.atoms: if atom.distance > rlow and atom.distance <= rhigh: count += 1 stress += sum(atom.stress) volume = self.vol_sphere(rhigh) - self.vol_sphere(rlow) return stress / volume / 3, count def shell_density(self, elements, mole, dr): """Shell density for species inside elements. mole unit - g/cm^3 dr unit - angstrom """ # Usually density_dr is different from stats_dr. self.stats(dr) # Avogadro constant. Modified by coefficient used to # convert angstrom^3 to cm^3. NA = 6.022 / 10 nbins = len(self._shell_atoms[elements[0]]) # Calculate atom count for all species in elements as whole. # Convert numpy.Array to mutable list. count = [x for x in sum([self._shell_atoms[ele] for ele in elements])] # Calculate density. for i in range(nbins): r_low = i * dr r_high = r_low + dr # Volume unit is Angstrom^3. volume = self.vol_sphere(r_high) - self.vol_sphere(r_low) count[i] = count[i] / NA / volume return count def bubble_density(self, elements, mole, dr): pass def xyz_density(self, elements, mole, dx): """Density distribution along x, y, and z inside box.""" # Avogadro constant. Modified by coefficient used to # convert angstrom^3 to cm^3. NA = 6.022 / 10 nx = int(math.ceil((self.bx[1] - self.bx[0]) / dx)) ny = int(math.ceil((self.by[1] - self.by[0]) / dx)) nz = int(math.ceil((self.bz[1] - self.bz[0]) / dx)) dist = {} dist['x'] = [0 for x in range(nx)] dist['y'] = [0 for y in range(ny)] dist['z'] = [0 for z in range(nz)] for ele in elements: # Count atoms. for atom in self._elements[ele]: dist['x'][int(atom.xyz[0] / dx)] += 1 dist['y'][int(atom.xyz[1] / dx)] += 1 dist['z'][int(atom.xyz[2] / dx)] += 1 volx = (self.by[1] - self.by[0]) * (self.bz[1] - self.bz[0]) * dx voly = (self.bx[1] - self.bx[0]) * (self.bz[1] - self.bz[0]) * dx volz = (self.by[1] - self.by[0]) * (self.bx[1] - self.bx[0]) * dx for i in range(nx): # Calculate density. dist['x'][i] = dist['x'][i] / NA / volx dist['y'][i] = dist['y'][i] / NA / voly dist['z'][i] = dist['z'][i] / NA / volz return dist def vol_sphere(self, r): """Volume of sphere with radius r.""" return 4.0/3 * Box.PI * (r ** 3) def volume(self): """ Box volume """ return (self.bx[1] - self.bx[0]) * (self.by[1] - self.by[0]) * (self.bz[1] - self.bz[0]) class Trajectory( object ): '''Gas molecule trajectory class''' def __init__( self, pdbPath, xtcPath ): self.universe = md.Universe( pdbPath, xtcPath ) self.set_density_params() @property def n_frames( self ): return self.universe.trajectory.n_frames @property def frame( self ): return self.universe.trajectory.frame def set_density_params(self, low=0.4, high=0.5, length=60 ): ''' Generate grid with length of dnesity_grid_length at x,y,z directions. Grids whose density are between low * max_density and high * max_density will be used for radius calculation. d ''' self.density_low = low self.density_high = high self.density_grid_length = length def set_frame( self, frame ): self.universe.trajectory[ frame ] def radius( self, frame ): ''' Bubble radius at one frame. Method: 1. Load the snapshot at frame 2. Load x, y, z coordinates 3. Calculate density grid mesh at grid points 4. Filter the shell grids with density between low * max density and high * max density 5. Calculate the average radius ''' start = time.clock() self.set_frame( frame ) # Load x, y, z coordinates data = pd.DataFrame( list(self.universe.coord), columns=['x','y','z']) x = data[ 'x' ].values y = data[ 'y' ].values z = data[ 'z' ].values # Density grid xyz = scipy.vstack( [ x, y, z ] ) kde = scipy.stats.gaussian_kde( xyz ) xmin, ymin, zmin = x.min(), y.min(), z.min() xmax, ymax, zmax = x.max(), y.max(), z.max() NI = complex( imag=self.density_grid_length) xi, yi, zi = scipy.mgrid[ xmin:xmax:NI, ymin:ymax:NI, zmin:zmax:NI ] coords = scipy.vstack([item.ravel() for item in [xi, yi, zi]]) density = kde(coords).reshape(xi.shape) # Filter density grid density_max = density.max() density_low = self.density_low * density_max density_high = self.density_high * density_max xyzs = [] N = self.density_grid_length for idx, idy, idz in product( xrange(N), xrange(N), xrange(N) ): if density_low < density[ idx, idy, idz ] <= density_high: xyzs.append( [ xi[ idx, idy, idz ], yi[ idx, idy, idz ], zi[ idx, idy, idz ] ] ) xyzs = np.array( xyzs ) # Average radius center = xyzs.mean( axis=0 ) rs = [] for xyz_ele in xyzs: rs.append( np.linalg.norm( center - xyz_ele ) ) duration = time.clock() - start print( "Radius for frame {} calculated in {:.2f} seconds".format( frame, duration ) ) return center, scipy.mean( rs ) def radius_for_frames( self, start, end, step=1 ): ret = [] for frame in xrange( start, end, step ): center, radius = self.radius( frame ) ret.append( [ frame, radius ] ) return ret def all_radius( self ): return self.radius_for_frames( 0, self.n_frames, 1 ) def regression( self, radiusList ): ''' Input (frame, radius) lists and do linear regression on the data ''' ts = [ ele[0] for ele in radiusList ] rs = [ ele[1] for ele in radiusList ] slope, intercept, r_value, p_value, std_err = scipy.stats.linregress( ts, rs ) return slope, intercept, r_value, p_value, std_err def plot_radius( self, rs, notebook=False ): ''' plot dots and linear regression results ''' xs = [ ele[0] for ele in rs ] ys = [ ele[1] for ele in rs ] x_min = min( xs ) x_max = max( xs ) x_min = x_min - ( x_max - x_min ) * 0.05 x_max = x_max + ( x_max - x_min ) * 0.05 slope, intercept, r_value, p_value, std_err = self.regression( rs ) xs_line = [ x_min ] + xs + [ x_max ] ys_line = [ ele * slope + intercept for ele in xs_line ] # Scatter plot scatter = go.Scatter( x = [ele[0] for ele in rs], y = [ele[1] for ele in rs], mode = 'markers', name = 'Radius' ) reg_line = go.Scatter( x = xs_line, y = ys_line, mode='lines', name='y={:.4f}x+{:.4f}, p-value={:.2f}, StdErr={:.3f}'.format(slope, intercept, p_value, std_err) ) data = go.Data([scatter, reg_line]) plot = plotly.offline.iplot if notebook else plotly.offline.plot plot( { 'data': data, 'layout': go.Layout( title='Radius vs Frame', xaxis={'title':'Frame'}, yaxis={'title':'Radius'} ) } ) def flux_info( self, start, end, step=1 ): ''' Flux info for frames [start:end:step]. Info are, for each step, nframe, center, radius, n atoms inside sphere ''' info = [] for nframe in xrange( start, end, step ): center, radius = self.radius( nframe ) # Selector for AtomGroup in MDAnalysis selector = 'point ' + ' '.join( str( ele ) for ele in list( center ) + [ radius ] ) # Explicitly set frame here self.set_frame( nframe ) atoms = self.universe.select_atoms( selector ) natoms = atoms.n_atoms info.append( (nframe, center, radius, natoms) ) return info ################################################# ################# Exceptions #################### ################################################# class AtomUnmeasuredError(Exception): pass ################################################ ################## Functions ################### ################################################ def next_n_lines(file_opened, N, strip='right'): strip_dic = { 'right': string.rstrip, 'left': string.lstrip, 'both': string.strip } if strip: return [strip_dic[strip](x) for x in islice(file_opened, N)] else: return list(islice(file_opened, N)) def read_stress(stress_file, N=settings.NLINES, normalPressure=False): """ Read dump file into a list of atoms, which have type / coordinates / stresses info stored as Atom properties. Dump file data format: atom_id atom_type x y z stress_x stress_y stress_z """ atoms = {} count = 0 data = next_n_lines(stress_file, N)[9:] while data: atoms[count] = [] for line in data: line = line.strip().split() identifier = int(line[0]) atom_type = int(line[1]) element = settings.ELEMENTS[atom_type] xyz = tuple([float(x) for x in line[2:5]]) if normalPressure: # To calculate normal pressure, we need xx, yy, zz, xy, xz, yz stress = tuple([float(x) for x in line[5:11]]) else: # To calculate pressure, we need xx, yy, zz stress = tuple([float(x) for x in line[5:8]]) atom = Atom(identifier, type=atom_type, element=element, xyz=xyz, stress=stress, normal=normalPressure) atoms[count].append(atom) # Process next N lines. data = next_n_lines(stress_file, N)[9:] count += 1 return atoms def read_pdb(filename): """ Read pdb file as a list of atoms """ logging.info( "Reading {}".format(filename) ) atoms_lines = [] with open(filename, 'r') as pdbfile: for line in pdbfile: if line.startswith('CRYST'): cryst_line = line elif line.startswith('ATOM'): atoms_lines.append( line ) x, y, z = [float(ele) for ele in cryst_line.strip().split()[1:4] ] atoms = [] for line in atoms_lines: data = line.strip().split() idx = int(data[1]) element = data[2][:2] coor = [ float(ele) for ele in data[5:8] ] atoms.append( Atom(identifier=idx, element=element, xyz=coor) ) return atoms, (x,y,z) def combine_water(atoms, remove=True): """ Combine water atoms """ combined = [] ne = [ ele for ele in atoms if ele.element == 'Ne' ] wat = [ele for ele in atoms if ele.element != 'Ne' ] logging.info("Before:: {} Ne, {} Water atoms".format(len(ne), len(wat))) idx_wat = len(ne) + 1 comb_wat = [] for idx in range( len( wat ) / 3 ): coor1 = np.array( wat[ idx * 3 ].xyz ) coor2 = np.array( wat[ idx * 3 + 1 ].xyz ) coor3 = np.array( wat[ idx * 3 + 2 ].xyz ) coor = (coor1 + coor2 + coor3) / 3. comb_wat.append(Atom(identifier=idx_wat, element='W', xyz=coor)) idx_wat += 1 if remove: selected = random.sample(comb_wat, len(comb_wat)/4) else: selected = comb_wat n_ne = len(ne) for idx in xrange(len(selected)): selected[idx].id = idx + 1 + n_ne logging.info("After:: {} Ne, {} Water atoms".format(len(ne), len(selected))) return ne + selected def write_lammps_data(atoms, xyz, filename): """ LAMMPS data format: atom idx, molecule idx, atom type, x, y, z, """ atom_types = {'Ne':1, 'W':2} x, y, z = xyz header = "LAMMPS bubble\n\n" \ "{n_atoms} atoms\n\n" \ "{n_types} atom types\n" \ "0 bond types\n" \ "0 angle types\n\n" \ "0 {x} xlo xhi\n0 {y} ylo yhi\n0 {z} zlo zhi\n\n"\ "Atoms\n\n".format(n_atoms=len(atoms), n_types=2,x=x,y=y,z=z) print(header) fmt = "{idx} {mol} {atype} {charge} {x} {y} {z}\n" for idx, atom in enumerate(atoms): header += fmt.format(idx=atom.id, mol=atom.id, atype=atom_types[atom.element], charge=0, x=atom.xyz[0], y=atom.xyz[1], z=atom.xyz[2]) with open(filename, 'w') as output: output.write(header) def average_atom_stress(write=True, step=0, *args): """Calculates averaged stress from multiple stress files. write determines whether to write output or not. step determines which timestep to average.""" n_files = float(len(args)) stress_list = [] for ele in args: stress_list.append(read_stress(ele)[step]) # Sort atoms by id. stress_list[-1].sort(key=lambda x: x.id) n_atoms = len(stress_list[0]) atoms = [] # Average stress for each atom id. for i in range(n_atoms): sx = sum([x[i].stress[0] for x in stress_list]) / n_files sy = sum([x[i].stress[1] for x in stress_list]) / n_files sz = sum([x[i].stress[2] for x in stress_list]) / n_files atom = stress_list[0][i] atoms.append( Atom(atom.id, type=atom.type, element=atom.element, xyz=atom.xyz, stress=(sx, sy, sz)) ) # Write averaged stress to file. if write: out_name = '.'.join(args[0].name.split('.')[:-1]) + '_averaged.dat' with open(out_name, 'w') as output: # Write header lines to be compatitable with LAMMPS dump files. output.write('Header line\n' * 9) for atom in atoms: # Do not write element here to be compatitable with # LAMMPS dump files. output.write("{} {} {} {} {} {} {} {}\n".format( atom.id, atom.type, atom.xyz[0], atom.xyz[1], atom.xyz[2], atom.stress[0], atom.stress[1], atom.stress[2])) print("Average Stress saved to {}.".format(out_name)) return atoms def build_box(atoms, timestep, radius, center, use_atomic_volume, average_on_atom, bx, by, bz): """Build a box from a list of atoms.""" box = Box(timestep, radius=radius, center=center, use_atomic_volume=use_atomic_volume, average_on_atom=average_on_atom) for atom in atoms: box.add_atom(atom) box.set_boundary(bx=bx, by=by, bz=bz) box.measure() return box def write_density(density, dr, outname, header): """Write density (both shell and xyz density) stats to output file. One density list at a time. """ with open(outname, 'w') as output: output.write(header) for i, item in enumerate(density): low = i * dr high = low + dr output.write('{l:.3f}\t{h:.3f}\t{d:.13f}\n'.format(l=low, h=high, d=item)) def write_pressure(pressure, dr, outname, header, bubble=False): """Write pressure (both bubble and shell pressure) stats to output file. If bubble is True, r_low is always zero. """ logging.info( "Writing output to {}".format(outname) ) if bubble: # Bubble pressure has in pressure and out pressure. with open(outname, 'w') as output: output.write(header) nbins = len(pressure['in']) for i in range(nbins): low = 0 high = (i + 1) * dr if i < nbins - 1: output.write('{l:.3f}\t{h:.3f}\t{pin:.13f}\t{pout:.13f}\n'.format( l=low, h=high, pin=pressure['in'][i], pout=pressure['out'][i+1] )) else: output.write('{l:.3f}\t{h:.3f}\t{pin:.13f}\t{pout:.13f}\n'.format( l=low, h=high, pin=pressure['in'][i], pout=0 )) else: # Shell pressure. with open(outname, 'w') as output: output.write(header) for i, item in enumerate(pressure): low = i * dr high = low + dr output.write('{l:.3f}\t{h:.3f}\t{p:.13f}\n'.format(l=low, h=high, p=item)) def write_ratio(ratio, dr, outname, header, bubble=True): """Write atom ratio stats to output file. If bubble is True, r_low is always zero. """ with open(outname, 'w') as output: output.write(header) for i, item in enumerate(ratio): low = 0 if bubble else i * dr high = (i + 1) * dr output.write('{l:.3f}\t{h:.3f}\t{r:.13f}\n'.format(l=low, h=high, r=item)) def bubble_ratio(box, elements, out_fmt, header, dr, time, container, debug=False): """Calculate bubble ratio stats and write results to disk.""" for eles in elements: # Ratio stats for each element. e = ''.join(eles) print('Bubble ratio stats for {e}'.format(e=e)) # Calculate ratio. ratio = box.atom_stats(eles[0], dr) # Write to file. outname = out_fmt.format(time=time, ele=e) write_ratio(ratio, dr, outname, header, bubble=True) if debug: # For testing. with open(container, 'a') as cc: cc.write(outname + '\n') def shell_ratio(box, elements, out_fmt, header, dr, time, container, debug=False): """Calculate shell ratio stats and write results to disk.""" pass def bubble_pressure(box, elements, out_fmt, header, dr, time, container, debug=False): """Calculate bubble pressure and write results to disk.""" for eles in elements: # Bubble pressure stats for each group of specified elements. e = ''.join(eles) print("Bubble pressure stats for {e}\n".format(e=e)) # Calculate bubble pressure. bubble_pressure = box.pressure_stats(eles, dr) # Write bubble pressure. outname = out_fmt.format(time=time, ele=e) write_pressure(bubble_pressure, dr, outname, header, bubble=True) if debug: # For testing. with open(container, 'a') as cc: cc.write(outname + '\n') def shell_pressure(box, elements, out_fmt, header, dr, time, container, normal=False, debug=False): """Calculate shell pressure and write results to disk.""" for eles in elements: # Shell pressure stats for each group of specified elements. e = ''.join(eles) print('Shell pressure stats for {e}\n'.format(e=e)) # Shell pressure. if not normal: shell_pressure = box.shell_pressure_stats(eles, dr, normal=normal) # Write to disk. outname = out_fmt.format(time=time, ele=e) write_pressure(shell_pressure, dr, outname, header, bubble=False) if debug: # For testing. with open(container, 'a') as cc: cc.write(outname + '\n') else: shell_pressure = box.shell_pressure_stats(eles, dr, normal=normal) shell_r, shell_theta, shell_phi = shell_pressure['r'], shell_pressure['theta'], shell_pressure['phi'] # Write to disk. outname1 = out_fmt.format(time=time, ele=e) + '_r' outname2 = out_fmt.format(time=time, ele=e) + '_theta' outname3 = out_fmt.format( time=time, ele=e ) + '_phi' write_pressure(shell_r, dr, outname1, header, bubble=False) write_pressure(shell_theta, dr, outname2, header, bubble=False) write_pressure( shell_phi, dr, outname3, header, bubble=False ) if debug: # For testing. with open(container, 'a') as cc: cc.write( outname1 + '\n' ) cc.write( outname2 + '\n' ) cc.write( outname3 + '\n' ) def bubble_density(box, elements, mole, out_fmt, header, dr, time, container, debug=False): """Calculate bubble density stats and write results to disk.""" for eles in elements: # Bubble density stats for each group of specified elements. e = ''.join(eles) print('Bubble density stats for {e}\n'.format(e=e)) # Bubble density. bubble_density = box.bubble_density(eles, mole, dr) # Write to disk. outname = out_fmt.format(time=time, ele=e) write_density(bubble_density, dr, outname, header) if debug: # For testing. with open(container, 'a') as cc: cc.write(outname + '\n') def shell_density(box, elements, mole, out_fmt, header, dr, time, container, debug=False): """Calculate shell density stats and write results to disk.""" for eles in elements: # Shell density stats for each group of specified elements. e = ''.join(eles) print('Shell density stats for {e}\n'.format(e=e)) # Shell density. shell_density = box.shell_density(eles, mole, dr) # Write to disk. outname = out_fmt.format(time=time, ele=e) write_density(shell_density, dr, outname, header) if debug: # For testing. with open(container, 'a') as cc: cc.write(outname + '\n') def xyz_density(box, elements, mole, out_fmt, header, dr, time, container, debug=False): """Calculate xyz density stats and write results to disk.""" for eles in elements: # XYZ density stats for each group of specified elements. e = ''.join(eles) print('XYZ density stats for {e}\n'.format(e=e)) # XYZ density. xyz_density = box.xyz_density(eles, mole, dr) # Write to disk. xout = out_fmt.format(time=time, ele=e, xyz='x') yout = out_fmt.format(time=time, ele=e, xyz='y') zout = out_fmt.format(time=time, ele=e, xyz='z') write_density(xyz_density['x'], dr, xout, header) write_density(xyz_density['y'], dr, yout, header) write_density(xyz_density['z'], dr, zout, header) if debug: # For testing. with open(container, 'a') as cc: out = '\n'.join([xout, yout, zout, '']) cc.write(out) def get_radius(box, element, dr, n=1, ratio=0.5): """Get the radius of a bubble. Radius is determined to be r with closest value of n_element / n_atoms to ratio, i.e. within radius, n_element / n_atoms should be as close to ratio as possible. n specifies number of radiuses to return, i.e. n radiuses that have n_element / n_atoms values closest to ratio.""" bubble_ratio = box.atom_stats(element, dr) deltas = [abs(x - ratio) for x in bubble_ratio] # Use nanmin to ignore NaNs in ratio vector. # Do not select radiuses smaller than 10 angstrom. min_index = deltas.index(np.nanmin(deltas)) n = n / 2 ret = [] for i in range(-n, n + 1): index = min_index + i ret.append((dr * (index + 1), bubble_ratio[index])) return ret
mikkkee/Bubble
bubble.py
Python
mit
44,991
[ "Avogadro", "LAMMPS", "MDAnalysis" ]
bd7309c821ad9383fe6a56b82952b0fb87e626f256ad7cfdfac3100dc6535e97
#!/usr/bin/env python """ Convert from interval file to interval index file. usage: %prog <options> in_file out_file -c, --chr-col: chromosome column, default=1 -s, --start-col: start column, default=2 -e, --end-col: end column, default=3 """ from __future__ import division import optparse from galaxy import eggs eggs.require( "bx-python" ) from bx.interval_index_file import Indexes def main(): # Read options, args. parser = optparse.OptionParser() parser.add_option( '-c', '--chr-col', type='int', dest='chrom_col', default=1 ) parser.add_option( '-s', '--start-col', type='int', dest='start_col', default=2 ) parser.add_option( '-e', '--end-col', type='int', dest='end_col', default=3 ) (options, args) = parser.parse_args() input_fname, output_fname = args # Make column indices 0-based. options.chrom_col -= 1 options.start_col -= 1 options.end_col -= 1 # Do conversion. index = Indexes() offset = 0 for line in open(input_fname, "r"): feature = line.strip().split() if not feature or feature[0].startswith("track") or feature[0].startswith("#"): offset += len(line) continue chrom = feature[ options.chrom_col ] chrom_start = int( feature[ options.start_col ] ) chrom_end = int( feature[ options.end_col ] ) index.add( chrom, chrom_start, chrom_end, offset ) offset += len(line) index.write( open(output_fname, "w") ) if __name__ == "__main__": main()
mikel-egana-aranguren/SADI-Galaxy-Docker
galaxy-dist/lib/galaxy/datatypes/converters/interval_to_interval_index_converter.py
Python
gpl-3.0
1,532
[ "Galaxy" ]
674e2f572fffd16df3b709a3ce7157ccb5f522f9c62ae3c4b714eb89336955e9
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding: utf-8 -*- # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 # transformations.py # Copyright (c) 2006, Christoph Gohlke # Copyright (c) 2006-2010, The Regents of the University of California # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of the copyright holders nor the names of any # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. """ Homogeneous Transformation Matrices and Quaternions --- :mod:`MDAnalysis.lib.transformations` ============================================================================================== A library for calculating 4x4 matrices for translating, rotating, reflecting, scaling, shearing, projecting, orthogonalizing, and superimposing arrays of 3D homogeneous coordinates as well as for converting between rotation matrices, Euler angles, and quaternions. Also includes an Arcball control object and functions to decompose transformation matrices. :Authors: `Christoph Gohlke <http://www.lfd.uci.edu/~gohlke/>`__, Laboratory for Fluorescence Dynamics, University of California, Irvine :Version: 2010.05.10 :Licence: BSD 3-clause Requirements ------------ * `Python 2.6 or 3.1 <http://www.python.org>`__ * `Numpy 1.4 <http://numpy.scipy.org>`__ * `transformations.c 2010.04.10 <http://www.lfd.uci.edu/~gohlke/>`__ (optional implementation of some functions in C) Notes ----- The API is not stable yet and is expected to change between revisions. This Python code is not optimized for speed. Refer to the transformations.c module for a faster implementation of some functions. Documentation in HTML format can be generated with epydoc. Matrices (M) can be inverted using ``numpy.linalg.inv(M)``, concatenated using ``numpy.dot(M0, M1)``, or used to transform homogeneous coordinates (v) using ``numpy.dot(M, v)`` for shape ``(4, *)`` "point of arrays", respectively ``numpy.dot(v, M.T)`` for shape ``(*, 4)`` "array of points". Use the transpose of transformation matrices for OpenGL ``glMultMatrixd()``. Calculations are carried out with ``numpy.float64`` precision. Vector, point, quaternion, and matrix function arguments are expected to be "array like", i.e. tuple, list, or numpy arrays. Return types are numpy arrays unless specified otherwise. Angles are in radians unless specified otherwise. Quaternions w+ix+jy+kz are represented as ``[w, x, y, z]``. A triple of Euler angles can be applied/interpreted in 24 ways, which can be specified using a 4 character string or encoded 4-tuple: - *Axes 4-string*: e.g. 'sxyz' or 'ryxy' - first character : rotations are applied to 's'tatic or 'r'otating frame - remaining characters : successive rotation axis 'x', 'y', or 'z' - *Axes 4-tuple*: e.g. (0, 0, 0, 0) or (1, 1, 1, 1) - inner axis: code of axis ('x':0, 'y':1, 'z':2) of rightmost matrix. - parity : even (0) if inner axis 'x' is followed by 'y', 'y' is followed by 'z', or 'z' is followed by 'x'. Otherwise odd (1). - repetition : first and last axis are same (1) or different (0). - frame : rotations are applied to static (0) or rotating (1) frame. References ---------- (1) Matrices and transformations. Ronald Goldman. In "Graphics Gems I", pp 472-475. Morgan Kaufmann, 1990. (2) More matrices and transformations: shear and pseudo-perspective. Ronald Goldman. In "Graphics Gems II", pp 320-323. Morgan Kaufmann, 1991. (3) Decomposing a matrix into simple transformations. Spencer Thomas. In "Graphics Gems II", pp 320-323. Morgan Kaufmann, 1991. (4) Recovering the data from the transformation matrix. Ronald Goldman. In "Graphics Gems II", pp 324-331. Morgan Kaufmann, 1991. (5) Euler angle conversion. Ken Shoemake. In "Graphics Gems IV", pp 222-229. Morgan Kaufmann, 1994. (6) Arcball rotation control. Ken Shoemake. In "Graphics Gems IV", pp 175-192. Morgan Kaufmann, 1994. (7) Representing attitude: Euler angles, unit quaternions, and rotation vectors. James Diebel. 2006. (8) A discussion of the solution for the best rotation to relate two sets of vectors. W Kabsch. Acta Cryst. 1978. A34, 827-828. (9) Closed-form solution of absolute orientation using unit quaternions. BKP Horn. J Opt Soc Am A. 1987. 4(4):629-642. (10) Quaternions. Ken Shoemake. http://www.sfu.ca/~jwa3/cmpt461/files/quatut.pdf (11) From quaternion to matrix and back. JMP van Waveren. 2005. http://www.intel.com/cd/ids/developer/asmo-na/eng/293748.htm (12) Uniform random rotations. Ken Shoemake. In "Graphics Gems III", pp 124-132. Morgan Kaufmann, 1992. (13) Quaternion in molecular modeling. CFF Karney. J Mol Graph Mod, 25(5):595-604 (14) New method for extracting the quaternion from a rotation matrix. Itzhack Y Bar-Itzhack, J Guid Contr Dynam. 2000. 23(6): 1085-1087. Examples -------- >>> alpha, beta, gamma = 0.123, -1.234, 2.345 >>> origin, xaxis, yaxis, zaxis = (0, 0, 0), (1, 0, 0), (0, 1, 0), (0, 0, 1) >>> I = identity_matrix() >>> Rx = rotation_matrix(alpha, xaxis) >>> Ry = rotation_matrix(beta, yaxis) >>> Rz = rotation_matrix(gamma, zaxis) >>> R = concatenate_matrices(Rx, Ry, Rz) >>> euler = euler_from_matrix(R, 'rxyz') >>> numpy.allclose([alpha, beta, gamma], euler) True >>> Re = euler_matrix(alpha, beta, gamma, 'rxyz') >>> is_same_transform(R, Re) True >>> al, be, ga = euler_from_matrix(Re, 'rxyz') >>> is_same_transform(Re, euler_matrix(al, be, ga, 'rxyz')) True >>> qx = quaternion_about_axis(alpha, xaxis) >>> qy = quaternion_about_axis(beta, yaxis) >>> qz = quaternion_about_axis(gamma, zaxis) >>> q = quaternion_multiply(qx, qy) >>> q = quaternion_multiply(q, qz) >>> Rq = quaternion_matrix(q) >>> is_same_transform(R, Rq) True >>> S = scale_matrix(1.23, origin) >>> T = translation_matrix((1, 2, 3)) >>> Z = shear_matrix(beta, xaxis, origin, zaxis) >>> R = random_rotation_matrix(numpy.random.rand(3)) >>> M = concatenate_matrices(T, R, Z, S) >>> scale, shear, angles, trans, persp = decompose_matrix(M) >>> numpy.allclose(scale, 1.23) True >>> numpy.allclose(trans, (1, 2, 3)) True >>> numpy.allclose(shear, (0, math.tan(beta), 0)) True >>> is_same_transform(R, euler_matrix(axes='sxyz', *angles)) True >>> M1 = compose_matrix(scale, shear, angles, trans, persp) >>> is_same_transform(M, M1) True Functions --------- .. See `help(MDAnalysis.lib.transformations)` for a listing of functions or .. the online help. .. versionchanged:: 0.11.0 Transformations library moved from MDAnalysis.core.transformations to MDAnalysis.lib.transformations """ import sys import os import warnings import math import numpy as np from numpy.linalg import norm from .mdamath import angle as vecangle def identity_matrix(): """Return 4x4 identity/unit matrix. >>> I = identity_matrix() >>> np.allclose(I, np.dot(I, I)) True >>> np.sum(I), np.trace(I) (4.0, 4.0) >>> np.allclose(I, np.identity(4, dtype=np.float64)) True """ return np.identity(4, dtype=np.float64) def translation_matrix(direction): """Return matrix to translate by direction vector. >>> v = np.random.random(3) - 0.5 >>> np.allclose(v, translation_matrix(v)[:3, 3]) True """ M = np.identity(4) M[:3, 3] = direction[:3] return M def translation_from_matrix(matrix): """Return translation vector from translation matrix. >>> v0 = np.random.random(3) - 0.5 >>> v1 = translation_from_matrix(translation_matrix(v0)) >>> np.allclose(v0, v1) True """ return np.array(matrix, copy=False)[:3, 3].copy() def reflection_matrix(point, normal): """Return matrix to mirror at plane defined by point and normal vector. >>> v0 = np.random.random(4) - 0.5 >>> v0[3] = 1.0 >>> v1 = np.random.random(3) - 0.5 >>> R = reflection_matrix(v0, v1) >>> np.allclose(2., np.trace(R)) True >>> np.allclose(v0, np.dot(R, v0)) True >>> v2 = v0.copy() >>> v2[:3] += v1 >>> v3 = v0.copy() >>> v2[:3] -= v1 >>> np.allclose(v2, np.dot(R, v3)) True """ normal = unit_vector(normal[:3]) M = np.identity(4) M[:3, :3] -= 2.0 * np.outer(normal, normal) M[:3, 3] = (2.0 * np.dot(point[:3], normal)) * normal return M def reflection_from_matrix(matrix): """Return mirror plane point and normal vector from reflection matrix. >>> v0 = np.random.random(3) - 0.5 >>> v1 = np.random.random(3) - 0.5 >>> M0 = reflection_matrix(v0, v1) >>> point, normal = reflection_from_matrix(M0) >>> M1 = reflection_matrix(point, normal) >>> is_same_transform(M0, M1) True """ M = np.array(matrix, dtype=np.float64, copy=False) # normal: unit eigenvector corresponding to eigenvalue -1 l, V = np.linalg.eig(M[:3, :3]) i = np.where(abs(np.real(l) + 1.0) < 1e-8)[0] if not len(i): raise ValueError("no unit eigenvector corresponding to eigenvalue -1") normal = np.real(V[:, i[0]]).squeeze() # point: any unit eigenvector corresponding to eigenvalue 1 l, V = np.linalg.eig(M) i = np.where(abs(np.real(l) - 1.0) < 1e-8)[0] if not len(i): raise ValueError("no unit eigenvector corresponding to eigenvalue 1") point = np.real(V[:, i[-1]]).squeeze() point /= point[3] return point, normal def rotation_matrix(angle, direction, point=None): """Return matrix to rotate about axis defined by point and direction. >>> R = rotation_matrix(math.pi/2.0, [0, 0, 1], [1, 0, 0]) >>> np.allclose(np.dot(R, [0, 0, 0, 1]), [ 1., -1., 0., 1.]) True >>> angle = (random.random() - 0.5) * (2*math.pi) >>> direc = np.random.random(3) - 0.5 >>> point = np.random.random(3) - 0.5 >>> R0 = rotation_matrix(angle, direc, point) >>> R1 = rotation_matrix(angle-2*math.pi, direc, point) >>> is_same_transform(R0, R1) True >>> R0 = rotation_matrix(angle, direc, point) >>> R1 = rotation_matrix(-angle, -direc, point) >>> is_same_transform(R0, R1) True >>> I = np.identity(4, np.float64) >>> np.allclose(I, rotation_matrix(math.pi*2, direc)) True >>> np.allclose(2., np.trace(rotation_matrix(math.pi/2, ... direc, point))) True """ sina = math.sin(angle) cosa = math.cos(angle) direction = unit_vector(direction[:3]) # rotation matrix around unit vector R = np.array( ( (cosa, 0.0, 0.0), (0.0, cosa, 0.0), (0.0, 0.0, cosa)), dtype=np.float64) R += np.outer(direction, direction) * (1.0 - cosa) direction *= sina R += np.array( ((0.0, -direction[2], direction[1]), (direction[2], 0.0, -direction[0]), (-direction[1], direction[0], 0.0)), dtype=np.float64) M = np.identity(4) M[:3, :3] = R if point is not None: # rotation not around origin point = np.array(point[:3], dtype=np.float64, copy=False) M[:3, 3] = point - np.dot(R, point) return M def rotation_from_matrix(matrix): """Return rotation angle and axis from rotation matrix. >>> angle = (random.random() - 0.5) * (2*math.pi) >>> direc = np.random.random(3) - 0.5 >>> point = np.random.random(3) - 0.5 >>> R0 = rotation_matrix(angle, direc, point) >>> angle, direc, point = rotation_from_matrix(R0) >>> R1 = rotation_matrix(angle, direc, point) >>> is_same_transform(R0, R1) True """ R = np.array(matrix, dtype=np.float64, copy=False) R33 = R[:3, :3] # direction: unit eigenvector of R33 corresponding to eigenvalue of 1 l, W = np.linalg.eig(R33.T) i = np.where(abs(np.real(l) - 1.0) < 1e-8)[0] if not len(i): raise ValueError("no unit eigenvector corresponding to eigenvalue 1") direction = np.real(W[:, i[-1]]).squeeze() # point: unit eigenvector of R33 corresponding to eigenvalue of 1 l, Q = np.linalg.eig(R) i = np.where(abs(np.real(l) - 1.0) < 1e-8)[0] if not len(i): raise ValueError("no unit eigenvector corresponding to eigenvalue 1") point = np.real(Q[:, i[-1]]).squeeze() point /= point[3] # rotation angle depending on direction cosa = (np.trace(R33) - 1.0) / 2.0 if abs(direction[2]) > 1e-8: sina = (R[1, 0] + (cosa - 1.0) * direction[0] * direction[1]) / direction[2] elif abs(direction[1]) > 1e-8: sina = (R[0, 2] + (cosa - 1.0) * direction[0] * direction[2]) / direction[1] else: sina = (R[2, 1] + (cosa - 1.0) * direction[1] * direction[2]) / direction[0] angle = math.atan2(sina, cosa) return angle, direction, point def scale_matrix(factor, origin=None, direction=None): """Return matrix to scale by factor around origin in direction. Use factor -1 for point symmetry. >>> v = (np.random.rand(4, 5) - 0.5) * 20.0 >>> v[3] = 1.0 >>> S = scale_matrix(-1.234) >>> np.allclose(np.dot(S, v)[:3], -1.234*v[:3]) True >>> factor = random.random() * 10 - 5 >>> origin = np.random.random(3) - 0.5 >>> direct = np.random.random(3) - 0.5 >>> S = scale_matrix(factor, origin) >>> S = scale_matrix(factor, origin, direct) """ if direction is None: # uniform scaling M = np.array( ((factor, 0.0, 0.0, 0.0), (0.0, factor, 0.0, 0.0), (0.0, 0.0, factor, 0.0), (0.0, 0.0, 0.0, 1.0)), dtype=np.float64) if origin is not None: M[:3, 3] = origin[:3] M[:3, 3] *= 1.0 - factor else: # nonuniform scaling direction = unit_vector(direction[:3]) factor = 1.0 - factor M = np.identity(4) M[:3, :3] -= factor * np.outer(direction, direction) if origin is not None: M[:3, 3] = (factor * np.dot(origin[:3], direction)) * direction return M def scale_from_matrix(matrix): """Return scaling factor, origin and direction from scaling matrix. >>> factor = random.random() * 10 - 5 >>> origin = np.random.random(3) - 0.5 >>> direct = np.random.random(3) - 0.5 >>> S0 = scale_matrix(factor, origin) >>> factor, origin, direction = scale_from_matrix(S0) >>> S1 = scale_matrix(factor, origin, direction) >>> is_same_transform(S0, S1) True >>> S0 = scale_matrix(factor, origin, direct) >>> factor, origin, direction = scale_from_matrix(S0) >>> S1 = scale_matrix(factor, origin, direction) >>> is_same_transform(S0, S1) True """ M = np.array(matrix, dtype=np.float64, copy=False) M33 = M[:3, :3] factor = np.trace(M33) - 2.0 try: # direction: unit eigenvector corresponding to eigenvalue factor l, V = np.linalg.eig(M33) i = np.where(abs(np.real(l) - factor) < 1e-8)[0][0] direction = np.real(V[:, i]).squeeze() direction /= vector_norm(direction) except IndexError: # uniform scaling factor = (factor + 2.0) / 3.0 direction = None # origin: any eigenvector corresponding to eigenvalue 1 l, V = np.linalg.eig(M) i = np.where(abs(np.real(l) - 1.0) < 1e-8)[0] if not len(i): raise ValueError("no eigenvector corresponding to eigenvalue 1") origin = np.real(V[:, i[-1]]).squeeze() origin /= origin[3] return factor, origin, direction def projection_matrix(point, normal, direction=None, perspective=None, pseudo=False): """Return matrix to project onto plane defined by point and normal. Using either perspective point, projection direction, or none of both. If pseudo is True, perspective projections will preserve relative depth such that Perspective = dot(Orthogonal, PseudoPerspective). >>> P = projection_matrix((0, 0, 0), (1, 0, 0)) >>> np.allclose(P[1:, 1:], np.identity(4)[1:, 1:]) True >>> point = np.random.random(3) - 0.5 >>> normal = np.random.random(3) - 0.5 >>> direct = np.random.random(3) - 0.5 >>> persp = np.random.random(3) - 0.5 >>> P0 = projection_matrix(point, normal) >>> P1 = projection_matrix(point, normal, direction=direct) >>> P2 = projection_matrix(point, normal, perspective=persp) >>> P3 = projection_matrix(point, normal, perspective=persp, pseudo=True) >>> is_same_transform(P2, np.dot(P0, P3)) True >>> P = projection_matrix((3, 0, 0), (1, 1, 0), (1, 0, 0)) >>> v0 = (np.random.rand(4, 5) - 0.5) * 20.0 >>> v0[3] = 1.0 >>> v1 = np.dot(P, v0) >>> np.allclose(v1[1], v0[1]) True >>> np.allclose(v1[0], 3.0-v1[1]) True """ M = np.identity(4) point = np.array(point[:3], dtype=np.float64, copy=False) normal = unit_vector(normal[:3]) if perspective is not None: # perspective projection perspective = np.array(perspective[:3], dtype=np.float64, copy=False) M[0, 0] = M[1, 1] = M[2, 2] = np.dot(perspective - point, normal) M[:3, :3] -= np.outer(perspective, normal) if pseudo: # preserve relative depth M[:3, :3] -= np.outer(normal, normal) M[:3, 3] = np.dot(point, normal) * (perspective + normal) else: M[:3, 3] = np.dot(point, normal) * perspective M[3, :3] = -normal M[3, 3] = np.dot(perspective, normal) elif direction is not None: # parallel projection direction = np.array(direction[:3], dtype=np.float64, copy=False) scale = np.dot(direction, normal) M[:3, :3] -= np.outer(direction, normal) / scale M[:3, 3] = direction * (np.dot(point, normal) / scale) else: # orthogonal projection M[:3, :3] -= np.outer(normal, normal) M[:3, 3] = np.dot(point, normal) * normal return M def projection_from_matrix(matrix, pseudo=False): """Return projection plane and perspective point from projection matrix. Return values are same as arguments for projection_matrix function: point, normal, direction, perspective, and pseudo. >>> point = np.random.random(3) - 0.5 >>> normal = np.random.random(3) - 0.5 >>> direct = np.random.random(3) - 0.5 >>> persp = np.random.random(3) - 0.5 >>> P0 = projection_matrix(point, normal) >>> result = projection_from_matrix(P0) >>> P1 = projection_matrix(*result) >>> is_same_transform(P0, P1) True >>> P0 = projection_matrix(point, normal, direct) >>> result = projection_from_matrix(P0) >>> P1 = projection_matrix(*result) >>> is_same_transform(P0, P1) True >>> P0 = projection_matrix(point, normal, perspective=persp, pseudo=False) >>> result = projection_from_matrix(P0, pseudo=False) >>> P1 = projection_matrix(*result) >>> is_same_transform(P0, P1) True >>> P0 = projection_matrix(point, normal, perspective=persp, pseudo=True) >>> result = projection_from_matrix(P0, pseudo=True) >>> P1 = projection_matrix(*result) >>> is_same_transform(P0, P1) True """ M = np.array(matrix, dtype=np.float64, copy=False) M33 = M[:3, :3] l, V = np.linalg.eig(M) i = np.where(abs(np.real(l) - 1.0) < 1e-8)[0] if not pseudo and len(i): # point: any eigenvector corresponding to eigenvalue 1 point = np.real(V[:, i[-1]]).squeeze() point /= point[3] # direction: unit eigenvector corresponding to eigenvalue 0 l, V = np.linalg.eig(M33) i = np.where(abs(np.real(l)) < 1e-8)[0] if not len(i): raise ValueError("no eigenvector corresponding to eigenvalue 0") direction = np.real(V[:, i[0]]).squeeze() direction /= vector_norm(direction) # normal: unit eigenvector of M33.T corresponding to eigenvalue 0 l, V = np.linalg.eig(M33.T) i = np.where(abs(np.real(l)) < 1e-8)[0] if len(i): # parallel projection normal = np.real(V[:, i[0]]).squeeze() normal /= vector_norm(normal) return point, normal, direction, None, False else: # orthogonal projection, where normal equals direction vector return point, direction, None, None, False else: # perspective projection i = np.where(abs(np.real(l)) > 1e-8)[0] if not len(i): raise ValueError( "no eigenvector not corresponding to eigenvalue 0") point = np.real(V[:, i[-1]]).squeeze() point /= point[3] normal = - M[3, :3] perspective = M[:3, 3] / np.dot(point[:3], normal) if pseudo: perspective -= normal return point, normal, None, perspective, pseudo def clip_matrix(left, right, bottom, top, near, far, perspective=False): """Return matrix to obtain normalized device coordinates from frustrum. The frustrum bounds are axis-aligned along x (left, right), y (bottom, top) and z (near, far). Normalized device coordinates are in range [-1, 1] if coordinates are inside the frustrum. If perspective is True the frustrum is a truncated pyramid with the perspective point at origin and direction along z axis, otherwise an orthographic canonical view volume (a box). Homogeneous coordinates transformed by the perspective clip matrix need to be dehomogenized (devided by w coordinate). >>> frustrum = np.random.rand(6) >>> frustrum[1] += frustrum[0] >>> frustrum[3] += frustrum[2] >>> frustrum[5] += frustrum[4] >>> M = clip_matrix(perspective=False, *frustrum) >>> np.dot(M, [frustrum[0], frustrum[2], frustrum[4], 1.0]) array([-1., -1., -1., 1.]) >>> np.dot(M, [frustrum[1], frustrum[3], frustrum[5], 1.0]) array([ 1., 1., 1., 1.]) >>> M = clip_matrix(perspective=True, *frustrum) >>> v = np.dot(M, [frustrum[0], frustrum[2], frustrum[4], 1.0]) >>> v / v[3] array([-1., -1., -1., 1.]) >>> v = np.dot(M, [frustrum[1], frustrum[3], frustrum[4], 1.0]) >>> v / v[3] array([ 1., 1., -1., 1.]) """ if left >= right or bottom >= top or near >= far: raise ValueError("invalid frustrum") if perspective: if near <= _EPS: raise ValueError("invalid frustrum: near <= 0") t = 2.0 * near M = ( (-t / (right - left), 0.0, (right + left) / (right - left), 0.0), (0.0, -t / (top - bottom), (top + bottom) / (top - bottom), 0.0), (0.0, 0.0, -(far + near) / (far - near), t * far / (far - near)), (0.0, 0.0, -1.0, 0.0)) else: M = ( (2.0 / (right - left), 0.0, 0.0, (right + left) / (left - right)), (0.0, 2.0 / (top - bottom), 0.0, (top + bottom) / (bottom - top)), (0.0, 0.0, 2.0 / (far - near), (far + near) / (near - far)), (0.0, 0.0, 0.0, 1.0)) return np.array(M, dtype=np.float64) def shear_matrix(angle, direction, point, normal): """Return matrix to shear by angle along direction vector on shear plane. The shear plane is defined by a point and normal vector. The direction vector must be orthogonal to the plane's normal vector. A point P is transformed by the shear matrix into P" such that the vector P-P" is parallel to the direction vector and its extent is given by the angle of P-P'-P", where P' is the orthogonal projection of P onto the shear plane. >>> angle = (random.random() - 0.5) * 4*math.pi >>> direct = np.random.random(3) - 0.5 >>> point = np.random.random(3) - 0.5 >>> normal = np.cross(direct, np.random.random(3)) >>> S = shear_matrix(angle, direct, point, normal) >>> np.allclose(1.0, np.linalg.det(S)) True """ normal = unit_vector(normal[:3]) direction = unit_vector(direction[:3]) if abs(np.dot(normal, direction)) > 1e-6: raise ValueError("direction and normal vectors are not orthogonal") angle = math.tan(angle) M = np.identity(4) M[:3, :3] += angle * np.outer(direction, normal) M[:3, 3] = -angle * np.dot(point[:3], normal) * direction return M def shear_from_matrix(matrix): """Return shear angle, direction and plane from shear matrix. >>> angle = (random.random() - 0.5) * 4*math.pi >>> direct = np.random.random(3) - 0.5 >>> point = np.random.random(3) - 0.5 >>> normal = np.cross(direct, np.random.random(3)) >>> S0 = shear_matrix(angle, direct, point, normal) >>> angle, direct, point, normal = shear_from_matrix(S0) >>> S1 = shear_matrix(angle, direct, point, normal) >>> is_same_transform(S0, S1) True """ M = np.array(matrix, dtype=np.float64, copy=False) M33 = M[:3, :3] # normal: cross independent eigenvectors corresponding to the eigenvalue 1 l, V = np.linalg.eig(M33) i = np.where(abs(np.real(l) - 1.0) < 1e-4)[0] if len(i) < 2: raise ValueError("no two linear independent eigenvectors found {0!s}".format(l)) V = np.real(V[:, i]).squeeze().T lenorm = -1.0 for i0, i1 in ((0, 1), (0, 2), (1, 2)): n = np.cross(V[i0], V[i1]) l = vector_norm(n) if l > lenorm: lenorm = l normal = n normal /= lenorm # direction and angle direction = np.dot(M33 - np.identity(3), normal) angle = vector_norm(direction) direction /= angle angle = math.atan(angle) # point: eigenvector corresponding to eigenvalue 1 l, V = np.linalg.eig(M) i = np.where(abs(np.real(l) - 1.0) < 1e-8)[0] if not len(i): raise ValueError("no eigenvector corresponding to eigenvalue 1") point = np.real(V[:, i[-1]]).squeeze() point /= point[3] return angle, direction, point, normal def decompose_matrix(matrix): """Return sequence of transformations from transformation matrix. matrix : array_like Non-degenerative homogeneous transformation matrix Return tuple of: scale : vector of 3 scaling factors shear : list of shear factors for x-y, x-z, y-z axes angles : list of Euler angles about static x, y, z axes translate : translation vector along x, y, z axes perspective : perspective partition of matrix Raise ValueError if matrix is of wrong type or degenerative. >>> T0 = translation_matrix((1, 2, 3)) >>> scale, shear, angles, trans, persp = decompose_matrix(T0) >>> T1 = translation_matrix(trans) >>> np.allclose(T0, T1) True >>> S = scale_matrix(0.123) >>> scale, shear, angles, trans, persp = decompose_matrix(S) >>> scale[0] 0.123 >>> R0 = euler_matrix(1, 2, 3) >>> scale, shear, angles, trans, persp = decompose_matrix(R0) >>> R1 = euler_matrix(*angles) >>> np.allclose(R0, R1) True """ # pylint: disable=unsubscriptable-object M = np.array(matrix, dtype=np.float64, copy=True).T if abs(M[3, 3]) < _EPS: raise ValueError("M[3, 3] is zero") M /= M[3, 3] P = M.copy() P[:, 3] = 0, 0, 0, 1 if not np.linalg.det(P): raise ValueError("matrix is singular") scale = np.zeros((3, ), dtype=np.float64) shear = [0, 0, 0] angles = [0, 0, 0] if any(abs(M[:3, 3]) > _EPS): perspective = np.dot(M[:, 3], np.linalg.inv(P.T)) M[:, 3] = 0, 0, 0, 1 else: perspective = np.array((0, 0, 0, 1), dtype=np.float64) translate = M[3, :3].copy() M[3, :3] = 0 row = M[:3, :3].copy() scale[0] = vector_norm(row[0]) row[0] /= scale[0] shear[0] = np.dot(row[0], row[1]) row[1] -= row[0] * shear[0] scale[1] = vector_norm(row[1]) row[1] /= scale[1] shear[0] /= scale[1] shear[1] = np.dot(row[0], row[2]) row[2] -= row[0] * shear[1] shear[2] = np.dot(row[1], row[2]) row[2] -= row[1] * shear[2] scale[2] = vector_norm(row[2]) row[2] /= scale[2] shear[1:] /= scale[2] if np.dot(row[0], np.cross(row[1], row[2])) < 0: scale *= -1 row *= -1 angles[1] = math.asin(-row[0, 2]) if math.cos(angles[1]): angles[0] = math.atan2(row[1, 2], row[2, 2]) angles[2] = math.atan2(row[0, 1], row[0, 0]) else: #angles[0] = math.atan2(row[1, 0], row[1, 1]) angles[0] = math.atan2(-row[2, 1], row[1, 1]) angles[2] = 0.0 return scale, shear, angles, translate, perspective def compose_matrix(scale=None, shear=None, angles=None, translate=None, perspective=None): """Return transformation matrix from sequence of transformations. This is the inverse of the decompose_matrix function. Sequence of transformations: scale : vector of 3 scaling factors shear : list of shear factors for x-y, x-z, y-z axes angles : list of Euler angles about static x, y, z axes translate : translation vector along x, y, z axes perspective : perspective partition of matrix >>> scale = np.random.random(3) - 0.5 >>> shear = np.random.random(3) - 0.5 >>> angles = (np.random.random(3) - 0.5) * (2*math.pi) >>> trans = np.random.random(3) - 0.5 >>> persp = np.random.random(4) - 0.5 >>> M0 = compose_matrix(scale, shear, angles, trans, persp) >>> result = decompose_matrix(M0) >>> M1 = compose_matrix(*result) >>> is_same_transform(M0, M1) True """ M = np.identity(4) if perspective is not None: P = np.identity(4) P[3, :] = perspective[:4] M = np.dot(M, P) if translate is not None: T = np.identity(4) T[:3, 3] = translate[:3] M = np.dot(M, T) if angles is not None: R = euler_matrix(angles[0], angles[1], angles[2], 'sxyz') M = np.dot(M, R) if shear is not None: Z = np.identity(4) Z[1, 2] = shear[2] Z[0, 2] = shear[1] Z[0, 1] = shear[0] M = np.dot(M, Z) if scale is not None: S = np.identity(4) S[0, 0] = scale[0] S[1, 1] = scale[1] S[2, 2] = scale[2] M = np.dot(M, S) M /= M[3, 3] return M def orthogonalization_matrix(lengths, angles): """Return orthogonalization matrix for crystallographic cell coordinates. Angles are expected in degrees. The de-orthogonalization matrix is the inverse. >>> O = orthogonalization_matrix((10., 10., 10.), (90., 90., 90.)) >>> np.allclose(O[:3, :3], np.identity(3, float) * 10) True >>> O = orthogonalization_matrix([9.8, 12.0, 15.5], [87.2, 80.7, 69.7]) >>> np.allclose(np.sum(O), 43.063229) True """ a, b, c = lengths angles = np.radians(angles) sina, sinb, _ = np.sin(angles) cosa, cosb, cosg = np.cos(angles) co = (cosa * cosb - cosg) / (sina * sinb) return np.array( ( (a * sinb * math.sqrt(1.0 - co * co), 0.0, 0.0, 0.0), (-a * sinb * co, b * sina, 0.0, 0.0), (a * cosb, b * cosa, c, 0.0), (0.0, 0.0, 0.0, 1.0)), dtype=np.float64) def superimposition_matrix(v0, v1, scaling=False, usesvd=True): """Return matrix to transform given vector set into second vector set. `v0` and `v1` are shape `(3, *)` or `(4, *)` arrays of at least 3 vectors. If `usesvd` is ``True``, the weighted sum of squared deviations (RMSD) is minimized according to the algorithm by W. Kabsch [8]. Otherwise the quaternion based algorithm by B. Horn [9] is used (slower when using this Python implementation). The returned matrix performs rotation, translation and uniform scaling (if specified). >>> v0 = np.random.rand(3, 10) >>> M = superimposition_matrix(v0, v0) >>> np.allclose(M, np.identity(4)) True >>> R = random_rotation_matrix(np.random.random(3)) >>> v0 = ((1,0,0), (0,1,0), (0,0,1), (1,1,1)) >>> v1 = np.dot(R, v0) >>> M = superimposition_matrix(v0, v1) >>> np.allclose(v1, np.dot(M, v0)) True >>> v0 = (np.random.rand(4, 100) - 0.5) * 20.0 >>> v0[3] = 1.0 >>> v1 = np.dot(R, v0) >>> M = superimposition_matrix(v0, v1) >>> np.allclose(v1, np.dot(M, v0)) True >>> S = scale_matrix(random.random()) >>> T = translation_matrix(np.random.random(3)-0.5) >>> M = concatenate_matrices(T, R, S) >>> v1 = np.dot(M, v0) >>> v0[:3] += np.random.normal(0.0, 1e-9, 300).reshape(3, -1) >>> M = superimposition_matrix(v0, v1, scaling=True) >>> np.allclose(v1, np.dot(M, v0)) True >>> M = superimposition_matrix(v0, v1, scaling=True, usesvd=False) >>> np.allclose(v1, np.dot(M, v0)) True >>> v = np.empty((4, 100, 3), dtype=np.float64) >>> v[:, :, 0] = v0 >>> M = superimposition_matrix(v0, v1, scaling=True, usesvd=False) >>> np.allclose(v1, np.dot(M, v[:, :, 0])) True """ v0 = np.array(v0, dtype=np.float64, copy=False)[:3] v1 = np.array(v1, dtype=np.float64, copy=False)[:3] if v0.shape != v1.shape or v0.shape[1] < 3: raise ValueError("vector sets are of wrong shape or type") # move centroids to origin t0 = np.mean(v0, axis=1) t1 = np.mean(v1, axis=1) v0 = v0 - t0.reshape(3, 1) v1 = v1 - t1.reshape(3, 1) if usesvd: # Singular Value Decomposition of covariance matrix u, s, vh = np.linalg.svd(np.dot(v1, v0.T)) # rotation matrix from SVD orthonormal bases R = np.dot(u, vh) if np.linalg.det(R) < 0.0: # R does not constitute right handed system R -= np.outer(u[:, 2], vh[2, :] * 2.0) s[-1] *= -1.0 # homogeneous transformation matrix M = np.identity(4) M[:3, :3] = R else: # compute symmetric matrix N xx, yy, zz = np.sum(v0 * v1, axis=1) xy, yz, zx = np.sum(v0 * np.roll(v1, -1, axis=0), axis=1) xz, yx, zy = np.sum(v0 * np.roll(v1, -2, axis=0), axis=1) N = ( (xx + yy + zz, 0.0, 0.0, 0.0), (yz - zy, xx - yy - zz, 0.0, 0.0), (zx - xz, xy + yx, -xx + yy - zz, 0.0), (xy - yx, zx + xz, yz + zy, -xx - yy + zz)) # quaternion: eigenvector corresponding to most positive eigenvalue l, V = np.linalg.eigh(N) q = V[:, np.argmax(l)] q /= vector_norm(q) # unit quaternion # homogeneous transformation matrix M = quaternion_matrix(q) # scale: ratio of rms deviations from centroid if scaling: v0 *= v0 v1 *= v1 M[:3, :3] *= math.sqrt(np.sum(v1) / np.sum(v0)) # translation M[:3, 3] = t1 T = np.identity(4) T[:3, 3] = -t0 M = np.dot(M, T) return M def euler_matrix(ai, aj, ak, axes='sxyz'): """Return homogeneous rotation matrix from Euler angles and axis sequence. ai, aj, ak : Euler's roll, pitch and yaw angles axes : One of 24 axis sequences as string or encoded tuple >>> R = euler_matrix(1, 2, 3, 'syxz') >>> np.allclose(np.sum(R[0]), -1.34786452) True >>> R = euler_matrix(1, 2, 3, (0, 1, 0, 1)) >>> np.allclose(np.sum(R[0]), -0.383436184) True >>> ai, aj, ak = (4.0*math.pi) * (np.random.random(3) - 0.5) >>> for axes in _AXES2TUPLE.keys(): ... R = euler_matrix(ai, aj, ak, axes) >>> for axes in _TUPLE2AXES.keys(): ... R = euler_matrix(ai, aj, ak, axes) """ try: firstaxis, parity, repetition, frame = _AXES2TUPLE[axes] except (AttributeError, KeyError): _ = _TUPLE2AXES[axes] firstaxis, parity, repetition, frame = axes i = firstaxis j = _NEXT_AXIS[i + parity] k = _NEXT_AXIS[i - parity + 1] if frame: ai, ak = ak, ai if parity: ai, aj, ak = -ai, -aj, -ak si, sj, sk = math.sin(ai), math.sin(aj), math.sin(ak) ci, cj, ck = math.cos(ai), math.cos(aj), math.cos(ak) cc, cs = ci * ck, ci * sk sc, ss = si * ck, si * sk M = np.identity(4) if repetition: M[i, i] = cj M[i, j] = sj * si M[i, k] = sj * ci M[j, i] = sj * sk M[j, j] = -cj * ss + cc M[j, k] = -cj * cs - sc M[k, i] = -sj * ck M[k, j] = cj * sc + cs M[k, k] = cj * cc - ss else: M[i, i] = cj * ck M[i, j] = sj * sc - cs M[i, k] = sj * cc + ss M[j, i] = cj * sk M[j, j] = sj * ss + cc M[j, k] = sj * cs - sc M[k, i] = -sj M[k, j] = cj * si M[k, k] = cj * ci return M def euler_from_matrix(matrix, axes='sxyz'): """Return Euler angles from rotation matrix for specified axis sequence. axes : One of 24 axis sequences as string or encoded tuple Note that many Euler angle triplets can describe one matrix. >>> R0 = euler_matrix(1, 2, 3, 'syxz') >>> al, be, ga = euler_from_matrix(R0, 'syxz') >>> R1 = euler_matrix(al, be, ga, 'syxz') >>> np.allclose(R0, R1) True >>> angles = (4.0*math.pi) * (np.random.random(3) - 0.5) >>> for axes in _AXES2TUPLE.keys(): ... R0 = euler_matrix(axes=axes, *angles) ... R1 = euler_matrix(axes=axes, *euler_from_matrix(R0, axes)) ... if not np.allclose(R0, R1): print(axes, "failed") """ try: firstaxis, parity, repetition, frame = _AXES2TUPLE[axes.lower()] except (AttributeError, KeyError): _ = _TUPLE2AXES[axes] firstaxis, parity, repetition, frame = axes i = firstaxis j = _NEXT_AXIS[i + parity] k = _NEXT_AXIS[i - parity + 1] M = np.array(matrix, dtype=np.float64, copy=False)[:3, :3] if repetition: sy = math.sqrt(M[i, j] * M[i, j] + M[i, k] * M[i, k]) if sy > _EPS: ax = math.atan2(M[i, j], M[i, k]) ay = math.atan2(sy, M[i, i]) az = math.atan2(M[j, i], -M[k, i]) else: ax = math.atan2(-M[j, k], M[j, j]) ay = math.atan2(sy, M[i, i]) az = 0.0 else: cy = math.sqrt(M[i, i] * M[i, i] + M[j, i] * M[j, i]) if cy > _EPS: ax = math.atan2(M[k, j], M[k, k]) ay = math.atan2(-M[k, i], cy) az = math.atan2(M[j, i], M[i, i]) else: ax = math.atan2(-M[j, k], M[j, j]) ay = math.atan2(-M[k, i], cy) az = 0.0 if parity: ax, ay, az = -ax, -ay, -az if frame: ax, az = az, ax return ax, ay, az def euler_from_quaternion(quaternion, axes='sxyz'): """Return Euler angles from quaternion for specified axis sequence. >>> angles = euler_from_quaternion([0.99810947, 0.06146124, 0, 0]) >>> np.allclose(angles, [0.123, 0, 0]) True """ return euler_from_matrix(quaternion_matrix(quaternion), axes) def quaternion_from_euler(ai, aj, ak, axes='sxyz'): """Return quaternion from Euler angles and axis sequence. ai, aj, ak : Euler's roll, pitch and yaw angles axes : One of 24 axis sequences as string or encoded tuple >>> q = quaternion_from_euler(1, 2, 3, 'ryxz') >>> np.allclose(q, [0.435953, 0.310622, -0.718287, 0.444435]) True """ try: firstaxis, parity, repetition, frame = _AXES2TUPLE[axes.lower()] except (AttributeError, KeyError): _ = _TUPLE2AXES[axes] firstaxis, parity, repetition, frame = axes i = firstaxis + 1 j = _NEXT_AXIS[i + parity - 1] + 1 k = _NEXT_AXIS[i - parity] + 1 if frame: ai, ak = ak, ai if parity: aj = -aj ai /= 2.0 aj /= 2.0 ak /= 2.0 ci = math.cos(ai) si = math.sin(ai) cj = math.cos(aj) sj = math.sin(aj) ck = math.cos(ak) sk = math.sin(ak) cc = ci * ck cs = ci * sk sc = si * ck ss = si * sk quaternion = np.empty((4, ), dtype=np.float64) if repetition: quaternion[0] = cj * (cc - ss) quaternion[i] = cj * (cs + sc) quaternion[j] = sj * (cc + ss) quaternion[k] = sj * (cs - sc) else: quaternion[0] = cj * cc + sj * ss quaternion[i] = cj * sc - sj * cs quaternion[j] = cj * ss + sj * cc quaternion[k] = cj * cs - sj * sc if parity: quaternion[j] *= -1 return quaternion def quaternion_about_axis(angle, axis): """Return quaternion for rotation about axis. >>> q = quaternion_about_axis(0.123, (1, 0, 0)) >>> np.allclose(q, [0.99810947, 0.06146124, 0, 0]) True """ quaternion = np.zeros((4, ), dtype=np.float64) quaternion[1] = axis[0] quaternion[2] = axis[1] quaternion[3] = axis[2] qlen = vector_norm(quaternion) if qlen > _EPS: quaternion *= math.sin(angle / 2.0) / qlen quaternion[0] = math.cos(angle / 2.0) return quaternion def quaternion_matrix(quaternion): """Return homogeneous rotation matrix from quaternion. >>> M = quaternion_matrix([0.99810947, 0.06146124, 0, 0]) >>> np.allclose(M, rotation_matrix(0.123, (1, 0, 0))) True >>> M = quaternion_matrix([1, 0, 0, 0]) >>> np.allclose(M, identity_matrix()) True >>> M = quaternion_matrix([0, 1, 0, 0]) >>> np.allclose(M, np.diag([1, -1, -1, 1])) True """ q = np.array(quaternion[:4], dtype=np.float64, copy=True) nq = np.dot(q, q) if nq < _EPS: return np.identity(4) q *= math.sqrt(2.0 / nq) q = np.outer(q, q) return np.array( ( (1.0 - q[2, 2] - q[3, 3], q[1, 2] - q[3, 0], q[1, 3] + q[2, 0], 0.0), (q[1, 2] + q[3, 0], 1.0 - q[1, 1] - q[3, 3], q[2, 3] - q[1, 0], 0.0), (q[1, 3] - q[2, 0], q[2, 3] + q[1, 0], 1.0 - q[1, 1] - q[2, 2], 0.0), (0.0, 0.0, 0.0, 1.0) ), dtype=np.float64) def quaternion_from_matrix(matrix, isprecise=False): """Return quaternion from rotation matrix. If isprecise=True, the input matrix is assumed to be a precise rotation matrix and a faster algorithm is used. >>> q = quaternion_from_matrix(identity_matrix(), True) >>> np.allclose(q, [1., 0., 0., 0.]) True >>> q = quaternion_from_matrix(np.diag([1., -1., -1., 1.])) >>> np.allclose(q, [0, 1, 0, 0]) or np.allclose(q, [0, -1, 0, 0]) True >>> R = rotation_matrix(0.123, (1, 2, 3)) >>> q = quaternion_from_matrix(R, True) >>> np.allclose(q, [0.9981095, 0.0164262, 0.0328524, 0.0492786]) True >>> R = [[-0.545, 0.797, 0.260, 0], [0.733, 0.603, -0.313, 0], ... [-0.407, 0.021, -0.913, 0], [0, 0, 0, 1]] >>> q = quaternion_from_matrix(R) >>> np.allclose(q, [0.19069, 0.43736, 0.87485, -0.083611]) True >>> R = [[0.395, 0.362, 0.843, 0], [-0.626, 0.796, -0.056, 0], ... [-0.677, -0.498, 0.529, 0], [0, 0, 0, 1]] >>> q = quaternion_from_matrix(R) >>> np.allclose(q, [0.82336615, -0.13610694, 0.46344705, -0.29792603]) True >>> R = random_rotation_matrix() >>> q = quaternion_from_matrix(R) >>> is_same_transform(R, quaternion_matrix(q)) True """ M = np.array(matrix, dtype=np.float64, copy=False)[:4, :4] if isprecise: q = np.empty((4, ), dtype=np.float64) t = np.trace(M) if t > M[3, 3]: q[0] = t q[3] = M[1, 0] - M[0, 1] q[2] = M[0, 2] - M[2, 0] q[1] = M[2, 1] - M[1, 2] else: i, j, k = 1, 2, 3 if M[1, 1] > M[0, 0]: i, j, k = 2, 3, 1 if M[2, 2] > M[i, i]: i, j, k = 3, 1, 2 t = M[i, i] - (M[j, j] + M[k, k]) + M[3, 3] q[i] = t q[j] = M[i, j] + M[j, i] q[k] = M[k, i] + M[i, k] q[3] = M[k, j] - M[j, k] q *= 0.5 / math.sqrt(t * M[3, 3]) else: m00 = M[0, 0] m01 = M[0, 1] m02 = M[0, 2] m10 = M[1, 0] m11 = M[1, 1] m12 = M[1, 2] m20 = M[2, 0] m21 = M[2, 1] m22 = M[2, 2] # symmetric matrix K K = np.array(( (m00 - m11 - m22, 0.0, 0.0, 0.0), (m01 + m10, m11 - m00 - m22, 0.0, 0.0), (m02 + m20, m12 + m21, m22 - m00 - m11, 0.0), (m21 - m12, m02 - m20, m10 - m01, m00 + m11 + m22))) K /= 3.0 # quaternion is eigenvector of K that corresponds to largest eigenvalue l, V = np.linalg.eigh(K) q = V[[3, 0, 1, 2], np.argmax(l)] if q[0] < 0.0: q *= -1.0 return q def quaternion_multiply(quaternion1, quaternion0): """Return multiplication of two quaternions. >>> q = quaternion_multiply([4, 1, -2, 3], [8, -5, 6, 7]) >>> np.allclose(q, [28, -44, -14, 48]) True """ w0, x0, y0, z0 = quaternion0 w1, x1, y1, z1 = quaternion1 return np.array( ( -x1 * x0 - y1 * y0 - z1 * z0 + w1 * w0, x1 * w0 + y1 * z0 - z1 * y0 + w1 * x0, -x1 * z0 + y1 * w0 + z1 * x0 + w1 * y0, x1 * y0 - y1 * x0 + z1 * w0 + w1 * z0), dtype=np.float64) def quaternion_conjugate(quaternion): """Return conjugate of quaternion. >>> q0 = random_quaternion() >>> q1 = quaternion_conjugate(q0) >>> q1[0] == q0[0] and all(q1[1:] == -q0[1:]) True """ return np.array( ( quaternion[0], -quaternion[1], -quaternion[2], -quaternion[3]), dtype=np.float64) def quaternion_inverse(quaternion): """Return inverse of quaternion. >>> q0 = random_quaternion() >>> q1 = quaternion_inverse(q0) >>> np.allclose(quaternion_multiply(q0, q1), [1, 0, 0, 0]) True """ return quaternion_conjugate(quaternion) / np.dot(quaternion, quaternion) def quaternion_real(quaternion): """Return real part of quaternion. >>> quaternion_real([3.0, 0.0, 1.0, 2.0]) 3.0 """ return quaternion[0] def quaternion_imag(quaternion): """Return imaginary part of quaternion. >>> quaternion_imag([3.0, 0.0, 1.0, 2.0]) [0.0, 1.0, 2.0] """ return quaternion[1:4] def quaternion_slerp(quat0, quat1, fraction, spin=0, shortestpath=True): r"""Return spherical linear interpolation between two quaternions. >>> q0 = random_quaternion() >>> q1 = random_quaternion() >>> q = quaternion_slerp(q0, q1, 0.0) >>> np.allclose(q, q0) True >>> q = quaternion_slerp(q0, q1, 1.0, 1) >>> np.allclose(q, q1) True >>> q = quaternion_slerp(q0, q1, 0.5) >>> angle = math.acos(np.dot(q0, q)) >>> np.allclose(2.0, math.acos(np.dot(q0, q1)) / angle) or \ np.allclose(2.0, math.acos(-np.dot(q0, q1)) / angle) True """ q0 = unit_vector(quat0[:4]) q1 = unit_vector(quat1[:4]) if fraction == 0.0: return q0 elif fraction == 1.0: return q1 d = np.dot(q0, q1) if abs(abs(d) - 1.0) < _EPS: return q0 if shortestpath and d < 0.0: # invert rotation d = -d q1 *= -1.0 angle = math.acos(d) + spin * math.pi if abs(angle) < _EPS: return q0 isin = 1.0 / math.sin(angle) q0 *= math.sin((1.0 - fraction) * angle) * isin q1 *= math.sin(fraction * angle) * isin q0 += q1 return q0 def random_quaternion(rand=None): """Return uniform random unit quaternion. rand: array like or None Three independent random variables that are uniformly distributed between 0 and 1. >>> q = random_quaternion() >>> np.allclose(1.0, vector_norm(q)) True >>> q = random_quaternion(np.random.random(3)) >>> len(q.shape), q.shape[0]==4 (1, True) """ if rand is None: rand = np.random.rand(3) else: assert len(rand) == 3 r1 = np.sqrt(1.0 - rand[0]) r2 = np.sqrt(rand[0]) pi2 = math.pi * 2.0 t1 = pi2 * rand[1] t2 = pi2 * rand[2] return np.array( ( np.cos(t2) * r2, np.sin(t1) * r1, np.cos(t1) * r1, np.sin(t2) * r2), dtype=np.float64) def random_rotation_matrix(rand=None): """Return uniform random rotation matrix. rnd: array like Three independent random variables that are uniformly distributed between 0 and 1 for each returned quaternion. >>> R = random_rotation_matrix() >>> np.allclose(np.dot(R.T, R), np.identity(4)) True """ return quaternion_matrix(random_quaternion(rand)) class Arcball(object): """Virtual Trackball Control. >>> ball = Arcball() >>> ball = Arcball(initial=np.identity(4)) >>> ball.place([320, 320], 320) >>> ball.down([500, 250]) >>> ball.drag([475, 275]) >>> R = ball.matrix() >>> np.allclose(np.sum(R), 3.90583455) True >>> ball = Arcball(initial=[1, 0, 0, 0]) >>> ball.place([320, 320], 320) >>> ball.setaxes([1,1,0], [-1, 1, 0]) >>> ball.setconstrain(True) >>> ball.down([400, 200]) >>> ball.drag([200, 400]) >>> R = ball.matrix() >>> np.allclose(np.sum(R), 0.2055924) True >>> ball.next() """ def __init__(self, initial=None): """Initialize virtual trackball control. initial : quaternion or rotation matrix """ self._axis = None self._axes = None self._radius = 1.0 self._center = [0.0, 0.0] self._vdown = np.array([0, 0, 1], dtype=np.float64) self._constrain = False if initial is None: self._qdown = np.array([1, 0, 0, 0], dtype=np.float64) else: initial = np.array(initial, dtype=np.float64) if initial.shape == (4, 4): self._qdown = quaternion_from_matrix(initial) elif initial.shape == (4, ): initial /= vector_norm(initial) self._qdown = initial else: raise ValueError("initial not a quaternion or matrix") self._qnow = self._qpre = self._qdown def place(self, center, radius): """Place Arcball, e.g. when window size changes. center : sequence[2] Window coordinates of trackball center. radius : float Radius of trackball in window coordinates. """ self._radius = float(radius) self._center[0] = center[0] self._center[1] = center[1] def setaxes(self, *axes): """Set axes to constrain rotations.""" if axes is None: self._axes = None else: self._axes = [unit_vector(axis) for axis in axes] def setconstrain(self, constrain): """Set state of constrain to axis mode.""" self._constrain = constrain is True def getconstrain(self): """Return state of constrain to axis mode.""" return self._constrain def down(self, point): """Set initial cursor window coordinates and pick constrain-axis.""" self._vdown = arcball_map_to_sphere(point, self._center, self._radius) self._qdown = self._qpre = self._qnow if self._constrain and self._axes is not None: self._axis = arcball_nearest_axis(self._vdown, self._axes) self._vdown = arcball_constrain_to_axis(self._vdown, self._axis) else: self._axis = None def drag(self, point): """Update current cursor window coordinates.""" vnow = arcball_map_to_sphere(point, self._center, self._radius) if self._axis is not None: vnow = arcball_constrain_to_axis(vnow, self._axis) self._qpre = self._qnow t = np.cross(self._vdown, vnow) if np.dot(t, t) < _EPS: self._qnow = self._qdown else: q = [np.dot(self._vdown, vnow), t[0], t[1], t[2]] self._qnow = quaternion_multiply(q, self._qdown) def next(self, acceleration=0.0): """Continue rotation in direction of last drag.""" q = quaternion_slerp(self._qpre, self._qnow, 2.0 + acceleration, False) self._qpre, self._qnow = self._qnow, q def matrix(self): """Return homogeneous rotation matrix.""" return quaternion_matrix(self._qnow) def arcball_map_to_sphere(point, center, radius): """Return unit sphere coordinates from window coordinates.""" v = np.array( ( (point[0] - center[0]) / radius, (center[1] - point[1]) / radius, 0.0 ), dtype=np.float64 ) n = v[0] * v[0] + v[1] * v[1] if n > 1.0: v /= math.sqrt(n) # position outside of sphere else: v[2] = math.sqrt(1.0 - n) return v def arcball_constrain_to_axis(point, axis): """Return sphere point perpendicular to axis.""" v = np.array(point, dtype=np.float64, copy=True) a = np.array(axis, dtype=np.float64, copy=True) v -= a * np.dot(a, v) # on plane n = vector_norm(v) if n > _EPS: if v[2] < 0.0: v *= -1.0 v /= n return v if a[2] == 1.0: return np.array([1, 0, 0], dtype=np.float64) return unit_vector([-a[1], a[0], 0]) def arcball_nearest_axis(point, axes): """Return axis, which arc is nearest to point.""" point = np.array(point, dtype=np.float64, copy=False) nearest = None mx = -1.0 for axis in axes: t = np.dot(arcball_constrain_to_axis(point, axis), point) if t > mx: nearest = axis mx = t return nearest # epsilon for testing whether a number is close to zero _EPS = np.finfo(float).eps * 4.0 # axis sequences for Euler angles _NEXT_AXIS = [1, 2, 0, 1] # map axes strings to/from tuples of inner axis, parity, repetition, frame _AXES2TUPLE = { 'sxyz': (0, 0, 0, 0), 'sxyx': (0, 0, 1, 0), 'sxzy': (0, 1, 0, 0), 'sxzx': (0, 1, 1, 0), 'syzx': (1, 0, 0, 0), 'syzy': (1, 0, 1, 0), 'syxz': (1, 1, 0, 0), 'syxy': (1, 1, 1, 0), 'szxy': (2, 0, 0, 0), 'szxz': (2, 0, 1, 0), 'szyx': (2, 1, 0, 0), 'szyz': (2, 1, 1, 0), 'rzyx': (0, 0, 0, 1), 'rxyx': (0, 0, 1, 1), 'ryzx': (0, 1, 0, 1), 'rxzx': (0, 1, 1, 1), 'rxzy': (1, 0, 0, 1), 'ryzy': (1, 0, 1, 1), 'rzxy': (1, 1, 0, 1), 'ryxy': (1, 1, 1, 1), 'ryxz': (2, 0, 0, 1), 'rzxz': (2, 0, 1, 1), 'rxyz': (2, 1, 0, 1), 'rzyz': (2, 1, 1, 1)} _TUPLE2AXES = dict((v, k) for k, v in _AXES2TUPLE.items()) def vector_norm(data, axis=None, out=None): """Return length, i.e. eucledian norm, of ndarray along axis. >>> v = np.random.random(3) >>> n = vector_norm(v) >>> np.allclose(n, np.linalg.norm(v)) True >>> v = np.random.rand(6, 5, 3) >>> n = vector_norm(v, axis=-1) >>> np.allclose(n, np.sqrt(np.sum(v*v, axis=2))) True >>> n = vector_norm(v, axis=1) >>> np.allclose(n, np.sqrt(np.sum(v*v, axis=1))) True >>> v = np.random.rand(5, 4, 3) >>> n = np.empty((5, 3), dtype=np.float64) >>> vector_norm(v, axis=1, out=n) >>> np.allclose(n, np.sqrt(np.sum(v*v, axis=1))) True >>> vector_norm([]) 0.0 >>> vector_norm([1.0]) 1.0 """ data = np.array(data, dtype=np.float64, copy=True) if out is None: if data.ndim == 1: return math.sqrt(np.dot(data, data)) data *= data out = np.atleast_1d(np.sum(data, axis=axis)) np.sqrt(out, out) return out else: data *= data np.sum(data, axis=axis, out=out) np.sqrt(out, out) def unit_vector(data, axis=None, out=None): """Return ndarray normalized by length, i.e. eucledian norm, along axis. >>> v0 = np.random.random(3) >>> v1 = unit_vector(v0) >>> np.allclose(v1, v0 / np.linalg.norm(v0)) True >>> v0 = np.random.rand(5, 4, 3) >>> v1 = unit_vector(v0, axis=-1) >>> v2 = v0 / np.expand_dims(np.sqrt(np.sum(v0*v0, axis=2)), 2) >>> np.allclose(v1, v2) True >>> v1 = unit_vector(v0, axis=1) >>> v2 = v0 / np.expand_dims(np.sqrt(np.sum(v0*v0, axis=1)), 1) >>> np.allclose(v1, v2) True >>> v1 = np.empty((5, 4, 3), dtype=np.float64) >>> unit_vector(v0, axis=1, out=v1) >>> np.allclose(v1, v2) True >>> list(unit_vector([])) [] >>> list(unit_vector([1.0])) [1.0] """ if out is None: data = np.array(data, dtype=np.float64, copy=True) if data.ndim == 1: data /= math.sqrt(np.dot(data, data)) return data else: if out is not data: out[:] = np.array(data, copy=False) data = out length = np.atleast_1d(np.sum(data * data, axis)) np.sqrt(length, length) if axis is not None: length = np.expand_dims(length, axis) data /= length if out is None: return data def random_vector(size): """Return array of random doubles in the half-open interval [0.0, 1.0). >>> v = random_vector(10000) >>> np.all(v >= 0.0) and np.all(v < 1.0) True >>> v0 = random_vector(10) >>> v1 = random_vector(10) >>> np.any(v0 == v1) False """ return np.random.random(size) def inverse_matrix(matrix): """Return inverse of square transformation matrix. >>> M0 = random_rotation_matrix() >>> M1 = inverse_matrix(M0.T) >>> np.allclose(M1, np.linalg.inv(M0.T)) True >>> for size in range(1, 7): ... M0 = np.random.rand(size, size) ... M1 = inverse_matrix(M0) ... if not np.allclose(M1, np.linalg.inv(M0)): print(size) """ return np.linalg.inv(matrix) def concatenate_matrices(*matrices): """Return concatenation of series of transformation matrices. >>> M = np.random.rand(16).reshape((4, 4)) - 0.5 >>> np.allclose(M, concatenate_matrices(M)) True >>> np.allclose(np.dot(M, M.T), concatenate_matrices(M, M.T)) True """ M = np.identity(4) for i in matrices: M = np.dot(M, i) return M def is_same_transform(matrix0, matrix1): """Return True if two matrices perform same transformation. >>> is_same_transform(np.identity(4), np.identity(4)) True >>> is_same_transform(np.identity(4), random_rotation_matrix()) False """ matrix0 = np.array(matrix0, dtype=np.float64, copy=True) matrix0 /= matrix0[3, 3] matrix1 = np.array(matrix1, dtype=np.float64, copy=True) matrix1 /= matrix1[3, 3] return np.allclose(matrix0, matrix1) def _import_module(module_name, warn=True, prefix='_py_', ignore='_'): """Try import all public attributes from module into global namespace. Existing attributes with name clashes are renamed with prefix. Attributes starting with underscore are ignored by default. Return True on successful import. """ sys.path.append(os.path.dirname(__file__)) try: module = __import__(module_name) except ImportError: sys.path.pop() if warn: warnings.warn("failed to import module " + module_name) else: sys.path.pop() for attr in dir(module): if ignore and attr.startswith(ignore): continue if prefix: if attr in globals(): globals()[prefix + attr] = globals()[attr] elif warn: warnings.warn("no Python implementation of " + attr) globals()[attr] = getattr(module, attr) return True # orbeckst --- some simple geometry def rotaxis(a, b): """Return the rotation axis to rotate vector a into b. Parameters ---------- a, b : array_like two vectors Returns ------- c : np.ndarray vector to rotate a into b Note ---- If a == b this will always return [1, 0, 0] """ if np.allclose(a, b): return np.array([1, 0, 0]) c = np.cross(a, b) return c / np.linalg.norm(c) _import_module('_transformations') # Documentation in HTML format can be generated with Epydoc __docformat__ = "restructuredtext en"
MDAnalysis/mdanalysis
package/MDAnalysis/lib/transformations.py
Python
gpl-2.0
60,776
[ "MDAnalysis" ]
70d05b38071f8026ea8b7d1124b9837817e5d54fded376c33740733441f1b22d
#!/usr/bin/env python # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyStanfordDependencies ====================== Python interface for converting `Penn Treebank <http://www.cis.upenn.edu/~treebank/>`_ trees to `Universal Dependencies <http://universaldependencies.github.io/docs/>`_ and `Stanford Dependencies <http://nlp.stanford.edu/software/stanford-dependencies.shtml>`_. Example usage ------------- Start by getting a ``StanfordDependencies`` instance with ``StanfordDependencies.get_instance()``:: >>> import StanfordDependencies >>> sd = StanfordDependencies.get_instance(backend='subprocess') ``get_instance()`` takes several options. ``backend`` can currently be ``subprocess`` or ``jpype`` (see below). If you have an existing `Stanford CoreNLP <http://nlp.stanford.edu/software/corenlp.shtml>`_ or `Stanford Parser <http://nlp.stanford.edu/software/lex-parser.shtml>`_ jar file, use the ``jar_filename`` parameter to point to the full path of the jar file. Otherwise, PyStanfordDependencies will download a jar file for you and store it in locally (``~/.local/share/pystanforddeps``). You can request a specific version with the ``version`` flag, e.g., ``version='3.4.1'``. To convert trees, use the ``convert_trees()`` or ``convert_tree()`` method (note that by default, ``convert_trees()`` can be considerably faster if you're doing batch conversion). These return a sentence (list of ``Token`` objects) or a list of sentences (list of list of ``Token`` objects) respectively:: >>> sent = sd.convert_tree('(S1 (NP (DT some) (JJ blue) (NN moose)))') >>> for token in sent: ... print token ... Token(index=1, form='some', cpos='DT', pos='DT', head=3, deprel='det') Token(index=2, form='blue', cpos='JJ', pos='JJ', head=3, deprel='amod') Token(index=3, form='moose', cpos='NN', pos='NN', head=0, deprel='root') This tells you that ``moose`` is the head of the sentence and is modified by ``some`` (with a ``det`` = determiner relation) and ``blue`` (with an ``amod`` = adjective modifier relation). Fields on ``Token`` objects are readable as attributes. See docs for additional options in ``convert_tree()`` and ``convert_trees()``. Visualization ------------- If you have the `asciitree <https://pypi.python.org/pypi/asciitree>`_ package, you can use a prettier ASCII formatter:: >>> print sent.as_asciitree() moose [root] +-- some [det] +-- blue [amod] If you have Python 2.7 or later, you can use `Graphviz <http://graphviz.org/>`_ to render your graphs. You'll need the `Python graphviz <https://pypi.python.org/pypi/graphviz>`_ package to call ``as_dotgraph()``:: >>> dotgraph = sent.as_dotgraph() >>> print dotgraph digraph { 0 [label=root] 1 [label=some] 3 -> 1 [label=det] 2 [label=blue] 3 -> 2 [label=amod] 3 [label=moose] 0 -> 3 [label=root] } >>> dotgraph.render('moose') # renders a PDF by default 'moose.pdf' >>> dotgraph.format = 'svg' >>> dotgraph.render('moose') 'moose.svg' The Python `xdot <https://pypi.python.org/pypi/xdot>`_ package provides an interactive visualization:: >>> import xdot >>> window = xdot.DotWindow() >>> window.set_dotcode(dotgraph.source) Both ``as_asciitree()`` and ``as_dotgraph()`` allow customization. See the docs for additional options. Backends -------- Currently PyStanfordDependencies includes two backends: - ``subprocess`` (works anywhere with a ``java`` binary, but more overhead so batched conversions with ``convert_trees()`` are recommended) - ``jpype`` (requires `jpype1 <https://pypi.python.org/pypi/JPype1>`_, faster than the subprocess backend, also includes access to the Stanford CoreNLP lemmatizer) By default, PyStanfordDependencies will attempt to use the ``jpype`` backend. If ``jpype`` isn't available or crashes on startup, PyStanfordDependencies will fallback to ``subprocess`` with a warning. Universal Dependencies status ----------------------------- PyStanfordDependencies supports most features in `Universal Dependencies <http://universaldependencies.github.io/docs/>`_ (see `issue #10 <https://github.com/dmcc/PyStanfordDependencies/issues/10>`_ for the most up to date status). PyStanfordDependencies output matches Universal Dependencies in terms of structure and dependency labels, but Universal POS tags and features are missing. Currently, PyStanfordDependencies will output Universal Dependencies by default (unless you're using Stanford CoreNLP 3.5.1 or earlier). Related projects ---------------- - `clearnlp-converter <https://pypi.python.org/pypi/clearnlp-converter/>`_ (uses `clearnlp <http://www.clearnlp.com/>`_ instead of `Stanford CoreNLP <http://nlp.stanford.edu/software/corenlp.shtml>`_ for dependency conversion) More information ---------------- Licensed under `Apache 2.0 <http://www.apache.org/licenses/LICENSE-2.0>`_. Written by David McClosky (`homepage <http://nlp.stanford.edu/~mcclosky/>`_, `code <http://github.com/dmcc>`_) Bug reports and feature requests: `GitHub issue tracker <http://github.com/dmcc/PyStanfordDependencies/issues>`_ Release summaries ----------------- - 0.3.1 (2015.11.02): Better collapsed universal handling, bugfixes - 0.3.0 (2015.10.09): Support copy nodes, more input checking/debugging help, example ``convert.py`` program - 0.2.0 (2015.08.02): Universal Dependencies support (mostly), Python 3 support (fully), minor API updates - 0.1.7 (2015.06.13): Bugfixes for ``JPype``, handle version mismatches in IBM Java - 0.1.6 (2015.02.12): Support for ``graphviz`` formatting, CoreNLP 3.5.1, better Windows portability - 0.1.5 (2015.01.10): Support for ASCII tree formatting - 0.1.4 (2015.01.07): Fix ``CCprocessed`` support - 0.1.3 (2015.01.03): Bugfixes, coveralls integration, refactoring - 0.1.2 (2015.01.02): Better CoNLL structures, test suite and Travis CI support, bugfixes - 0.1.1 (2014.12.15): More docs, fewer bugs - 0.1 (2014.12.14): Initial release """ from .StanfordDependencies import (StanfordDependencies, get_instance, JavaRuntimeVersionError) from .CoNLL import Corpus, Sentence, Token __all__ = (StanfordDependencies, get_instance, JavaRuntimeVersionError, Corpus, Sentence, Token) __authors__ = 'David McClosky' __license__ = 'Apache 2.0' __version__ = '0.3.1' __email__ = 'notsoweird+pystanforddependencies@gmail.com'
dmcc/PyStanfordDependencies
StanfordDependencies/__init__.py
Python
apache-2.0
6,989
[ "MOOSE" ]
44eb769b5a6fceb12a683ee68fdbc9c452f2bb7f7088d877a5f5b96cbd782202
import time class TestBlogPage(object): def test_blog_page_home(self, browser, site_url): browser.visit(site_url + '/blog/') assert browser.status_code == 200 assert browser.is_text_present('Blog') def test_blog_page_click_search(self, browser, site_url): browser.visit(site_url + '/blog/') browser.fill('q', 'test') button = browser.find_by_css('.btn-default')[0] button.click() time.sleep(2) assert browser.url == site_url + '/blog/search/?q=test' assert browser.status_code == 200 assert browser.is_text_present('Entries for search') def test_blog_feed_rss(self, browser, site_url): browser.visit(site_url + '/blog/feed/') assert browser.status_code == 200
APSL/puput
tests/functional/test_blog_page.py
Python
mit
781
[ "VisIt" ]
8afa2b10675bd7dea2f4c320927e85bc1d67ae82688c6b6f2f1ce390b4de3a2c
# # Copyright (C) 2013,2014,2015,2016 The ESPResSo project # # This file is part of ESPResSo. # # ESPResSo is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # ESPResSo is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # from __future__ import print_function import espressomd._system as es import espressomd from espressomd import thermostat from espressomd import code_info from espressomd import electrostatics from espressomd import electrostatic_extensions import numpy print(""" ======================================================= = p3m.py = ======================================================= Program Information:""") print(code_info.features()) dev = "cpu" # System parameters ############################################################# # 10 000 Particles box_l = 10.7437 density = 0.7 # Interaction parameters (repulsive Lennard Jones) ############################################################# lj_eps = 1.0 lj_sig = 1.0 lj_cut = 1.12246 lj_cap = 20 # Integration parameters ############################################################# system = espressomd.System() system.time_step = 0.01 system.cell_system.skin = 0.4 #es._espressoHandle.Tcl_Eval('thermostat langevin 1.0 1.0') thermostat.Thermostat().set_langevin(1.0, 1.0) # warmup integration (with capped LJ potential) warm_steps = 100 warm_n_times = 30 # do the warmup until the particles have at least the distance min__dist min_dist = 0.9 # integration int_steps = 1000 int_n_times = 10 ############################################################# # Setup System # ############################################################# # Interaction setup ############################################################# system.box_l = [box_l, box_l, box_l] system.non_bonded_inter[0, 0].lennard_jones.set_params( epsilon=lj_eps, sigma=lj_sig, cutoff=lj_cut, shift="auto") system.non_bonded_inter.set_force_cap(lj_cap) print("LJ-parameters:") print(system.non_bonded_inter[0, 0].lennard_jones.get_params()) # Particle setup ############################################################# volume = box_l * box_l * box_l n_part = int(volume * density) for i in range(n_part): system.part.add(id=i, pos=numpy.random.random(3) * system.box_l) system.analysis.distto(0) print("Simulate {} particles in a cubic simulation box {} at density {}." .format(n_part, box_l, density).strip()) print("Interactions:\n") act_min_dist = system.analysis.mindist() print("Start with minimal distance {}".format(act_min_dist)) system.cell_system.max_num_cells = 2744 # Assingn charge to particles for i in range(n_part / 2 - 1): system.part[2 * i].q = -1.0 system.part[2 * i + 1].q = 1.0 # P3M setup after charge assigned ############################################################# print("\nSCRIPT--->Create p3m\n") p3m = electrostatics.P3M(bjerrum_length=2.0, accuracy=1e-2) print("\nSCRIPT--->Add actor\n") system.actors.add(p3m) print("\nSCRIPT--->P3M parameter:\n") p3m_params = p3m.get_params() for key in list(p3m_params.keys()): print("{} = {}".format(key, p3m_params[key])) print("\nSCRIPT--->Explicit tune call\n") p3m._tune() print("\nSCRIPT--->P3M parameter:\n") p3m_params = p3m.get_params() for key in list(p3m_params.keys()): print("{} = {}".format(key, p3m_params[key])) # elc=electrostatic_extensions.ELC(maxPWerror=1.0,gap_size=1.0) # system.actors.add(elc) print(system.actors) ############################################################# # Warmup Integration # ############################################################# # open Observable file obs_file = open("pylj_liquid.obs", "w") obs_file.write("# Time\tE_tot\tE_kin\tE_pot\n") print(""" Start warmup integration: At maximum {} times {} steps Stop if minimal distance is larger than {} """.strip().format(warm_n_times, warm_steps, min_dist)) # set LJ cap lj_cap = 20 system.non_bonded_inter.set_force_cap(lj_cap) print(system.non_bonded_inter[0, 0].lennard_jones) # Warmup Integration Loop i = 0 while (i < warm_n_times and act_min_dist < min_dist): system.integrator.run(warm_steps) # Warmup criterion act_min_dist = system.analysis.mindist() i += 1 # Increase LJ cap lj_cap = lj_cap + 10 system.non_bonded_inter.set_force_cap(lj_cap) # Just to see what else we may get from the c code import pprint pprint.pprint(system.cell_system.get_state(), width=1) # pprint.pprint(system.part.__getstate__(), width=1) pprint.pprint(system.__getstate__(), width=1) # write parameter file set_file = open("pylj_liquid.set", "w") set_file.write("box_l %s\ntime_step %s\nskin %s\n" % (box_l, system.time_step, system.cell_system.skin)) ############################################################# # Integration # ############################################################# print("\nStart integration: run %d times %d steps" % (int_n_times, int_steps)) # remove force capping lj_cap = 0 system.non_bonded_inter.set_force_cap(lj_cap) print(system.non_bonded_inter[0, 0].lennard_jones) # print(initial energies) energies = system.analysis.energy() print(energies) j = 0 for i in range(0, int_n_times): print("run %d at time=%f " % (i, system.time)) system.integrator.run(int_steps) energies = system.analysis.energy() print(energies) obs_file.write('{ time %s } %s\n' % (system.time, energies)) # write end configuration end_file = open("pylj_liquid.end", "w") end_file.write("{ time %f } \n { box_l %f }\n" % (system.time, box_l)) end_file.write("{ particles {id pos type} }") for i in range(n_part): end_file.write("%s\n" % system.part[i].pos) obs_file.close() set_file.close() end_file.close() # terminate program print("\nFinished.")
lahnerml/espresso
samples/python/p3m.py
Python
gpl-3.0
6,384
[ "ESPResSo" ]
0a8adcc1798f7a0b2614f6912b0a93010a5e4dce9f01e04b522d62a054b292f8
r"""OS routines for NT or Posix depending on what system we're on. This exports: - all functions from posix, nt or ce, e.g. unlink, stat, etc. - os.path is either posixpath or ntpath - os.name is either 'posix', 'nt' or 'ce'. - os.curdir is a string representing the current directory ('.' or ':') - os.pardir is a string representing the parent directory ('..' or '::') - os.sep is the (or a most common) pathname separator ('/' or ':' or '\\') - os.extsep is the extension separator (always '.') - os.altsep is the alternate pathname separator (None or '/') - os.pathsep is the component separator used in $PATH etc - os.linesep is the line separator in text files ('\r' or '\n' or '\r\n') - os.defpath is the default search path for executables - os.devnull is the file path of the null device ('/dev/null', etc.) Programs that import and use 'os' stand a better chance of being portable between different platforms. Of course, they must then only use functions that are defined by all platforms (e.g., unlink and opendir), and leave all pathname manipulation to os.path (e.g., split and join). """ #' import sys, errno import stat as st _names = sys.builtin_module_names # Note: more names are added to __all__ later. __all__ = ["altsep", "curdir", "pardir", "sep", "pathsep", "linesep", "defpath", "name", "path", "devnull", "SEEK_SET", "SEEK_CUR", "SEEK_END", "fsencode", "fsdecode", "get_exec_path", "fdopen", "extsep"] def _exists(name): return name in globals() def _get_exports_list(module): try: return list(module.__all__) except AttributeError: return [n for n in dir(module) if n[0] != '_'] # Any new dependencies of the os module and/or changes in path separator # requires updating importlib as well. if 'posix' in _names: name = 'posix' linesep = '\n' from posix import * try: from posix import _exit __all__.append('_exit') except ImportError: pass import posixpath as path try: from posix import _have_functions except ImportError: pass import posix __all__.extend(_get_exports_list(posix)) del posix elif 'nt' in _names: name = 'nt' linesep = '\r\n' from nt import * try: from nt import _exit __all__.append('_exit') except ImportError: pass import ntpath as path import nt __all__.extend(_get_exports_list(nt)) del nt try: from nt import _have_functions except ImportError: pass elif 'ce' in _names: name = 'ce' linesep = '\r\n' from ce import * try: from ce import _exit __all__.append('_exit') except ImportError: pass # We can use the standard Windows path. import ntpath as path import ce __all__.extend(_get_exports_list(ce)) del ce try: from ce import _have_functions except ImportError: pass elif 'uwp_os' in _names: name = 'uwp_os' linesep = '\r\n' from uwp_os import * try: from uwp_os import _exit __all__.append('_exit') except ImportError: pass import ntpath as path import uwp_os __all__.extend(_get_exports_list(uwp_os)) del uwp_os try: from uwp_os import _have_functions except ImportError: pass else: raise ImportError('no os specific module found') sys.modules['os.path'] = path from os.path import (curdir, pardir, sep, pathsep, defpath, extsep, altsep, devnull) del _names if _exists("_have_functions"): _globals = globals() def _add(str, fn): if (fn in _globals) and (str in _have_functions): _set.add(_globals[fn]) _set = set() _add("HAVE_FACCESSAT", "access") _add("HAVE_FCHMODAT", "chmod") _add("HAVE_FCHOWNAT", "chown") _add("HAVE_FSTATAT", "stat") _add("HAVE_FUTIMESAT", "utime") _add("HAVE_LINKAT", "link") _add("HAVE_MKDIRAT", "mkdir") _add("HAVE_MKFIFOAT", "mkfifo") _add("HAVE_MKNODAT", "mknod") _add("HAVE_OPENAT", "open") _add("HAVE_READLINKAT", "readlink") _add("HAVE_RENAMEAT", "rename") _add("HAVE_SYMLINKAT", "symlink") _add("HAVE_UNLINKAT", "unlink") _add("HAVE_UNLINKAT", "rmdir") _add("HAVE_UTIMENSAT", "utime") supports_dir_fd = _set _set = set() _add("HAVE_FACCESSAT", "access") supports_effective_ids = _set _set = set() _add("HAVE_FCHDIR", "chdir") _add("HAVE_FCHMOD", "chmod") _add("HAVE_FCHOWN", "chown") _add("HAVE_FDOPENDIR", "listdir") _add("HAVE_FEXECVE", "execve") _set.add(stat) # fstat always works _add("HAVE_FTRUNCATE", "truncate") _add("HAVE_FUTIMENS", "utime") _add("HAVE_FUTIMES", "utime") _add("HAVE_FPATHCONF", "pathconf") if _exists("statvfs") and _exists("fstatvfs"): # mac os x10.3 _add("HAVE_FSTATVFS", "statvfs") supports_fd = _set _set = set() _add("HAVE_FACCESSAT", "access") # Some platforms don't support lchmod(). Often the function exists # anyway, as a stub that always returns ENOSUP or perhaps EOPNOTSUPP. # (No, I don't know why that's a good design.) ./configure will detect # this and reject it--so HAVE_LCHMOD still won't be defined on such # platforms. This is Very Helpful. # # However, sometimes platforms without a working lchmod() *do* have # fchmodat(). (Examples: Linux kernel 3.2 with glibc 2.15, # OpenIndiana 3.x.) And fchmodat() has a flag that theoretically makes # it behave like lchmod(). So in theory it would be a suitable # replacement for lchmod(). But when lchmod() doesn't work, fchmodat()'s # flag doesn't work *either*. Sadly ./configure isn't sophisticated # enough to detect this condition--it only determines whether or not # fchmodat() minimally works. # # Therefore we simply ignore fchmodat() when deciding whether or not # os.chmod supports follow_symlinks. Just checking lchmod() is # sufficient. After all--if you have a working fchmodat(), your # lchmod() almost certainly works too. # # _add("HAVE_FCHMODAT", "chmod") _add("HAVE_FCHOWNAT", "chown") _add("HAVE_FSTATAT", "stat") _add("HAVE_LCHFLAGS", "chflags") _add("HAVE_LCHMOD", "chmod") if _exists("lchown"): # mac os x10.3 _add("HAVE_LCHOWN", "chown") _add("HAVE_LINKAT", "link") _add("HAVE_LUTIMES", "utime") _add("HAVE_LSTAT", "stat") _add("HAVE_FSTATAT", "stat") _add("HAVE_UTIMENSAT", "utime") _add("MS_WINDOWS", "stat") supports_follow_symlinks = _set del _set del _have_functions del _globals del _add # Python uses fixed values for the SEEK_ constants; they are mapped # to native constants if necessary in posixmodule.c # Other possible SEEK values are directly imported from posixmodule.c SEEK_SET = 0 SEEK_CUR = 1 SEEK_END = 2 # Super directory utilities. # (Inspired by Eric Raymond; the doc strings are mostly his) def makedirs(name, mode=0o777, exist_ok=False): """makedirs(name [, mode=0o777][, exist_ok=False]) Super-mkdir; create a leaf directory and all intermediate ones. Works like mkdir, except that any intermediate path segment (not just the rightmost) will be created if it does not exist. If the target directory already exists, raise an OSError if exist_ok is False. Otherwise no exception is raised. This is recursive. """ head, tail = path.split(name) if not tail: head, tail = path.split(head) if head and tail and not path.exists(head): try: makedirs(head, mode, exist_ok) except FileExistsError: # be happy if someone already created the path pass cdir = curdir if isinstance(tail, bytes): cdir = bytes(curdir, 'ASCII') if tail == cdir: # xxx/newdir/. exists if xxx/newdir exists return try: mkdir(name, mode) except OSError as e: if not exist_ok or e.errno != errno.EEXIST or not path.isdir(name): raise def removedirs(name): """removedirs(name) Super-rmdir; remove a leaf directory and all empty intermediate ones. Works like rmdir except that, if the leaf directory is successfully removed, directories corresponding to rightmost path segments will be pruned away until either the whole path is consumed or an error occurs. Errors during this latter phase are ignored -- they generally mean that a directory was not empty. """ rmdir(name) head, tail = path.split(name) if not tail: head, tail = path.split(head) while head and tail: try: rmdir(head) except OSError: break head, tail = path.split(head) def renames(old, new): """renames(old, new) Super-rename; create directories as necessary and delete any left empty. Works like rename, except creation of any intermediate directories needed to make the new pathname good is attempted first. After the rename, directories corresponding to rightmost path segments of the old name will be pruned until either the whole path is consumed or a nonempty directory is found. Note: this function can fail with the new directory structure made if you lack permissions needed to unlink the leaf directory or file. """ head, tail = path.split(new) if head and tail and not path.exists(head): makedirs(head) rename(old, new) head, tail = path.split(old) if head and tail: try: removedirs(head) except OSError: pass __all__.extend(["makedirs", "removedirs", "renames"]) def walk(top, topdown=True, onerror=None, followlinks=False): """Directory tree generator. For each directory in the directory tree rooted at top (including top itself, but excluding '.' and '..'), yields a 3-tuple dirpath, dirnames, filenames dirpath is a string, the path to the directory. dirnames is a list of the names of the subdirectories in dirpath (excluding '.' and '..'). filenames is a list of the names of the non-directory files in dirpath. Note that the names in the lists are just names, with no path components. To get a full path (which begins with top) to a file or directory in dirpath, do os.path.join(dirpath, name). If optional arg 'topdown' is true or not specified, the triple for a directory is generated before the triples for any of its subdirectories (directories are generated top down). If topdown is false, the triple for a directory is generated after the triples for all of its subdirectories (directories are generated bottom up). When topdown is true, the caller can modify the dirnames list in-place (e.g., via del or slice assignment), and walk will only recurse into the subdirectories whose names remain in dirnames; this can be used to prune the search, or to impose a specific order of visiting. Modifying dirnames when topdown is false is ineffective, since the directories in dirnames have already been generated by the time dirnames itself is generated. No matter the value of topdown, the list of subdirectories is retrieved before the tuples for the directory and its subdirectories are generated. By default errors from the os.scandir() call are ignored. If optional arg 'onerror' is specified, it should be a function; it will be called with one argument, an OSError instance. It can report the error to continue with the walk, or raise the exception to abort the walk. Note that the filename is available as the filename attribute of the exception object. By default, os.walk does not follow symbolic links to subdirectories on systems that support them. In order to get this functionality, set the optional argument 'followlinks' to true. Caution: if you pass a relative pathname for top, don't change the current working directory between resumptions of walk. walk never changes the current directory, and assumes that the client doesn't either. Example: import os from os.path import join, getsize for root, dirs, files in os.walk('python/Lib/email'): print(root, "consumes", end="") print(sum([getsize(join(root, name)) for name in files]), end="") print("bytes in", len(files), "non-directory files") if 'CVS' in dirs: dirs.remove('CVS') # don't visit CVS directories """ dirs = [] nondirs = [] # We may not have read permission for top, in which case we can't # get a list of the files the directory contains. os.walk # always suppressed the exception then, rather than blow up for a # minor reason when (say) a thousand readable directories are still # left to visit. That logic is copied here. try: # Note that scandir is global in this module due # to earlier import-*. scandir_it = scandir(top) except OSError as error: if onerror is not None: onerror(error) return while True: try: try: entry = next(scandir_it) except StopIteration: break except OSError as error: if onerror is not None: onerror(error) return try: is_dir = entry.is_dir() except OSError: # If is_dir() raises an OSError, consider that the entry is not # a directory, same behaviour than os.path.isdir(). is_dir = False if is_dir: dirs.append(entry.name) else: nondirs.append(entry.name) if not topdown and is_dir: # Bottom-up: recurse into sub-directory, but exclude symlinks to # directories if followlinks is False if followlinks: walk_into = True else: try: is_symlink = entry.is_symlink() except OSError: # If is_symlink() raises an OSError, consider that the # entry is not a symbolic link, same behaviour than # os.path.islink(). is_symlink = False walk_into = not is_symlink if walk_into: yield from walk(entry.path, topdown, onerror, followlinks) # Yield before recursion if going top down if topdown: yield top, dirs, nondirs # Recurse into sub-directories islink, join = path.islink, path.join for name in dirs: new_path = join(top, name) # Issue #23605: os.path.islink() is used instead of caching # entry.is_symlink() result during the loop on os.scandir() because # the caller can replace the directory entry during the "yield" # above. if followlinks or not islink(new_path): yield from walk(new_path, topdown, onerror, followlinks) else: # Yield after recursion if going bottom up yield top, dirs, nondirs __all__.append("walk") if {open, stat} <= supports_dir_fd and {listdir, stat} <= supports_fd: def fwalk(top=".", topdown=True, onerror=None, *, follow_symlinks=False, dir_fd=None): """Directory tree generator. This behaves exactly like walk(), except that it yields a 4-tuple dirpath, dirnames, filenames, dirfd `dirpath`, `dirnames` and `filenames` are identical to walk() output, and `dirfd` is a file descriptor referring to the directory `dirpath`. The advantage of fwalk() over walk() is that it's safe against symlink races (when follow_symlinks is False). If dir_fd is not None, it should be a file descriptor open to a directory, and top should be relative; top will then be relative to that directory. (dir_fd is always supported for fwalk.) Caution: Since fwalk() yields file descriptors, those are only valid until the next iteration step, so you should dup() them if you want to keep them for a longer period. Example: import os for root, dirs, files, rootfd in os.fwalk('python/Lib/email'): print(root, "consumes", end="") print(sum([os.stat(name, dir_fd=rootfd).st_size for name in files]), end="") print("bytes in", len(files), "non-directory files") if 'CVS' in dirs: dirs.remove('CVS') # don't visit CVS directories """ # Note: To guard against symlink races, we use the standard # lstat()/open()/fstat() trick. orig_st = stat(top, follow_symlinks=False, dir_fd=dir_fd) topfd = open(top, O_RDONLY, dir_fd=dir_fd) try: if (follow_symlinks or (st.S_ISDIR(orig_st.st_mode) and path.samestat(orig_st, stat(topfd)))): yield from _fwalk(topfd, top, topdown, onerror, follow_symlinks) finally: close(topfd) def _fwalk(topfd, toppath, topdown, onerror, follow_symlinks): # Note: This uses O(depth of the directory tree) file descriptors: if # necessary, it can be adapted to only require O(1) FDs, see issue # #13734. names = listdir(topfd) dirs, nondirs = [], [] for name in names: try: # Here, we don't use AT_SYMLINK_NOFOLLOW to be consistent with # walk() which reports symlinks to directories as directories. # We do however check for symlinks before recursing into # a subdirectory. if st.S_ISDIR(stat(name, dir_fd=topfd).st_mode): dirs.append(name) else: nondirs.append(name) except FileNotFoundError: try: # Add dangling symlinks, ignore disappeared files if st.S_ISLNK(stat(name, dir_fd=topfd, follow_symlinks=False) .st_mode): nondirs.append(name) except FileNotFoundError: continue if topdown: yield toppath, dirs, nondirs, topfd for name in dirs: try: orig_st = stat(name, dir_fd=topfd, follow_symlinks=follow_symlinks) dirfd = open(name, O_RDONLY, dir_fd=topfd) except OSError as err: if onerror is not None: onerror(err) return try: if follow_symlinks or path.samestat(orig_st, stat(dirfd)): dirpath = path.join(toppath, name) yield from _fwalk(dirfd, dirpath, topdown, onerror, follow_symlinks) finally: close(dirfd) if not topdown: yield toppath, dirs, nondirs, topfd __all__.append("fwalk") # Make sure os.environ exists, at least try: environ except NameError: environ = {} if _exists('execv') and _exists('execve'): def execl(file, *args): """execl(file, *args) Execute the executable file with argument list args, replacing the current process. """ execv(file, args) def execle(file, *args): """execle(file, *args, env) Execute the executable file with argument list args and environment env, replacing the current process. """ env = args[-1] execve(file, args[:-1], env) def execlp(file, *args): """execlp(file, *args) Execute the executable file (which is searched for along $PATH) with argument list args, replacing the current process. """ execvp(file, args) def execlpe(file, *args): """execlpe(file, *args, env) Execute the executable file (which is searched for along $PATH) with argument list args and environment env, replacing the current process. """ env = args[-1] execvpe(file, args[:-1], env) def execvp(file, args): """execvp(file, args) Execute the executable file (which is searched for along $PATH) with argument list args, replacing the current process. args may be a list or tuple of strings. """ _execvpe(file, args) def execvpe(file, args, env): """execvpe(file, args, env) Execute the executable file (which is searched for along $PATH) with argument list args and environment env , replacing the current process. args may be a list or tuple of strings. """ _execvpe(file, args, env) __all__.extend(["execl","execle","execlp","execlpe","execvp","execvpe"]) def _execvpe(file, args, env=None): if env is not None: exec_func = execve argrest = (args, env) else: exec_func = execv argrest = (args,) env = environ head, tail = path.split(file) if head: exec_func(file, *argrest) return last_exc = saved_exc = None saved_tb = None path_list = get_exec_path(env) if name != 'nt': file = fsencode(file) path_list = map(fsencode, path_list) for dir in path_list: fullname = path.join(dir, file) try: exec_func(fullname, *argrest) except OSError as e: last_exc = e tb = sys.exc_info()[2] if (e.errno != errno.ENOENT and e.errno != errno.ENOTDIR and saved_exc is None): saved_exc = e saved_tb = tb if saved_exc: raise saved_exc.with_traceback(saved_tb) raise last_exc.with_traceback(tb) def get_exec_path(env=None): """Returns the sequence of directories that will be searched for the named executable (similar to a shell) when launching a process. *env* must be an environment variable dict or None. If *env* is None, os.environ will be used. """ # Use a local import instead of a global import to limit the number of # modules loaded at startup: the os module is always loaded at startup by # Python. It may also avoid a bootstrap issue. import warnings if env is None: env = environ # {b'PATH': ...}.get('PATH') and {'PATH': ...}.get(b'PATH') emit a # BytesWarning when using python -b or python -bb: ignore the warning with warnings.catch_warnings(): warnings.simplefilter("ignore", BytesWarning) try: path_list = env.get('PATH') except TypeError: path_list = None if supports_bytes_environ: try: path_listb = env[b'PATH'] except (KeyError, TypeError): pass else: if path_list is not None: raise ValueError( "env cannot contain 'PATH' and b'PATH' keys") path_list = path_listb if path_list is not None and isinstance(path_list, bytes): path_list = fsdecode(path_list) if path_list is None: path_list = defpath return path_list.split(pathsep) # Change environ to automatically call putenv(), unsetenv if they exist. from _collections_abc import MutableMapping class _Environ(MutableMapping): def __init__(self, data, encodekey, decodekey, encodevalue, decodevalue, putenv, unsetenv): self.encodekey = encodekey self.decodekey = decodekey self.encodevalue = encodevalue self.decodevalue = decodevalue self.putenv = putenv self.unsetenv = unsetenv self._data = data def __getitem__(self, key): try: value = self._data[self.encodekey(key)] except KeyError: # raise KeyError with the original key value raise KeyError(key) from None return self.decodevalue(value) def __setitem__(self, key, value): key = self.encodekey(key) value = self.encodevalue(value) self.putenv(key, value) self._data[key] = value def __delitem__(self, key): encodedkey = self.encodekey(key) self.unsetenv(encodedkey) try: del self._data[encodedkey] except KeyError: # raise KeyError with the original key value raise KeyError(key) from None def __iter__(self): for key in self._data: yield self.decodekey(key) def __len__(self): return len(self._data) def __repr__(self): return 'environ({{{}}})'.format(', '.join( ('{!r}: {!r}'.format(self.decodekey(key), self.decodevalue(value)) for key, value in self._data.items()))) def copy(self): return dict(self) def setdefault(self, key, value): if key not in self: self[key] = value return self[key] try: _putenv = putenv except NameError: _putenv = lambda key, value: None else: if "putenv" not in __all__: __all__.append("putenv") try: _unsetenv = unsetenv except NameError: _unsetenv = lambda key: _putenv(key, "") else: if "unsetenv" not in __all__: __all__.append("unsetenv") def _createenviron(): if name == 'nt' or name == 'uwp_os': # Where Env Var Names Must Be UPPERCASE def check_str(value): if not isinstance(value, str): raise TypeError("str expected, not %s" % type(value).__name__) return value encode = check_str decode = str def encodekey(key): return encode(key).upper() data = {} for key, value in environ.items(): data[encodekey(key)] = value else: # Where Env Var Names Can Be Mixed Case encoding = sys.getfilesystemencoding() def encode(value): if not isinstance(value, str): raise TypeError("str expected, not %s" % type(value).__name__) return value.encode(encoding, 'surrogateescape') def decode(value): return value.decode(encoding, 'surrogateescape') encodekey = encode data = environ return _Environ(data, encodekey, decode, encode, decode, _putenv, _unsetenv) # unicode environ environ = _createenviron() del _createenviron def getenv(key, default=None): """Get an environment variable, return None if it doesn't exist. The optional second argument can specify an alternate default. key, default and the result are str.""" return environ.get(key, default) supports_bytes_environ = (name != 'nt' and name != 'uwp_os') __all__.extend(("getenv", "supports_bytes_environ")) if supports_bytes_environ: def _check_bytes(value): if not isinstance(value, bytes): raise TypeError("bytes expected, not %s" % type(value).__name__) return value # bytes environ environb = _Environ(environ._data, _check_bytes, bytes, _check_bytes, bytes, _putenv, _unsetenv) del _check_bytes def getenvb(key, default=None): """Get an environment variable, return None if it doesn't exist. The optional second argument can specify an alternate default. key, default and the result are bytes.""" return environb.get(key, default) __all__.extend(("environb", "getenvb")) def _fscodec(): encoding = sys.getfilesystemencoding() if encoding == 'mbcs': errors = 'strict' else: errors = 'surrogateescape' def fsencode(filename): """ Encode filename to the filesystem encoding with 'surrogateescape' error handler, return bytes unchanged. On Windows, use 'strict' error handler if the file system encoding is 'mbcs' (which is the default encoding). """ if isinstance(filename, bytes): return filename elif isinstance(filename, str): return filename.encode(encoding, errors) else: raise TypeError("expect bytes or str, not %s" % type(filename).__name__) def fsdecode(filename): """ Decode filename from the filesystem encoding with 'surrogateescape' error handler, return str unchanged. On Windows, use 'strict' error handler if the file system encoding is 'mbcs' (which is the default encoding). """ if isinstance(filename, str): return filename elif isinstance(filename, bytes): return filename.decode(encoding, errors) else: raise TypeError("expect bytes or str, not %s" % type(filename).__name__) return fsencode, fsdecode fsencode, fsdecode = _fscodec() del _fscodec # Supply spawn*() (probably only for Unix) if _exists("fork") and not _exists("spawnv") and _exists("execv"): P_WAIT = 0 P_NOWAIT = P_NOWAITO = 1 __all__.extend(["P_WAIT", "P_NOWAIT", "P_NOWAITO"]) # XXX Should we support P_DETACH? I suppose it could fork()**2 # and close the std I/O streams. Also, P_OVERLAY is the same # as execv*()? def _spawnvef(mode, file, args, env, func): # Internal helper; func is the exec*() function to use pid = fork() if not pid: # Child try: if env is None: func(file, args) else: func(file, args, env) except: _exit(127) else: # Parent if mode == P_NOWAIT: return pid # Caller is responsible for waiting! while 1: wpid, sts = waitpid(pid, 0) if WIFSTOPPED(sts): continue elif WIFSIGNALED(sts): return -WTERMSIG(sts) elif WIFEXITED(sts): return WEXITSTATUS(sts) else: raise OSError("Not stopped, signaled or exited???") def spawnv(mode, file, args): """spawnv(mode, file, args) -> integer Execute file with arguments from args in a subprocess. If mode == P_NOWAIT return the pid of the process. If mode == P_WAIT return the process's exit code if it exits normally; otherwise return -SIG, where SIG is the signal that killed it. """ return _spawnvef(mode, file, args, None, execv) def spawnve(mode, file, args, env): """spawnve(mode, file, args, env) -> integer Execute file with arguments from args in a subprocess with the specified environment. If mode == P_NOWAIT return the pid of the process. If mode == P_WAIT return the process's exit code if it exits normally; otherwise return -SIG, where SIG is the signal that killed it. """ return _spawnvef(mode, file, args, env, execve) # Note: spawnvp[e] is't currently supported on Windows def spawnvp(mode, file, args): """spawnvp(mode, file, args) -> integer Execute file (which is looked for along $PATH) with arguments from args in a subprocess. If mode == P_NOWAIT return the pid of the process. If mode == P_WAIT return the process's exit code if it exits normally; otherwise return -SIG, where SIG is the signal that killed it. """ return _spawnvef(mode, file, args, None, execvp) def spawnvpe(mode, file, args, env): """spawnvpe(mode, file, args, env) -> integer Execute file (which is looked for along $PATH) with arguments from args in a subprocess with the supplied environment. If mode == P_NOWAIT return the pid of the process. If mode == P_WAIT return the process's exit code if it exits normally; otherwise return -SIG, where SIG is the signal that killed it. """ return _spawnvef(mode, file, args, env, execvpe) __all__.extend(["spawnv", "spawnve", "spawnvp", "spawnvpe"]) if _exists("spawnv"): # These aren't supplied by the basic Windows code # but can be easily implemented in Python def spawnl(mode, file, *args): """spawnl(mode, file, *args) -> integer Execute file with arguments from args in a subprocess. If mode == P_NOWAIT return the pid of the process. If mode == P_WAIT return the process's exit code if it exits normally; otherwise return -SIG, where SIG is the signal that killed it. """ return spawnv(mode, file, args) def spawnle(mode, file, *args): """spawnle(mode, file, *args, env) -> integer Execute file with arguments from args in a subprocess with the supplied environment. If mode == P_NOWAIT return the pid of the process. If mode == P_WAIT return the process's exit code if it exits normally; otherwise return -SIG, where SIG is the signal that killed it. """ env = args[-1] return spawnve(mode, file, args[:-1], env) __all__.extend(["spawnl", "spawnle"]) if _exists("spawnvp"): # At the moment, Windows doesn't implement spawnvp[e], # so it won't have spawnlp[e] either. def spawnlp(mode, file, *args): """spawnlp(mode, file, *args) -> integer Execute file (which is looked for along $PATH) with arguments from args in a subprocess with the supplied environment. If mode == P_NOWAIT return the pid of the process. If mode == P_WAIT return the process's exit code if it exits normally; otherwise return -SIG, where SIG is the signal that killed it. """ return spawnvp(mode, file, args) def spawnlpe(mode, file, *args): """spawnlpe(mode, file, *args, env) -> integer Execute file (which is looked for along $PATH) with arguments from args in a subprocess with the supplied environment. If mode == P_NOWAIT return the pid of the process. If mode == P_WAIT return the process's exit code if it exits normally; otherwise return -SIG, where SIG is the signal that killed it. """ env = args[-1] return spawnvpe(mode, file, args[:-1], env) __all__.extend(["spawnlp", "spawnlpe"]) if name != 'uwp_os': # Supply os.popen() def popen(cmd, mode="r", buffering=-1): if not isinstance(cmd, str): raise TypeError("invalid cmd type (%s, expected string)" % type(cmd)) if mode not in ("r", "w"): raise ValueError("invalid mode %r" % mode) if buffering == 0 or buffering is None: raise ValueError("popen() does not support unbuffered streams") import subprocess, io if mode == "r": proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, bufsize=buffering) return _wrap_close(io.TextIOWrapper(proc.stdout), proc) else: proc = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, bufsize=buffering) return _wrap_close(io.TextIOWrapper(proc.stdin), proc) __all__.extend(["popen"]) # Helper for popen() -- a proxy for a file whose close waits for the process class _wrap_close: def __init__(self, stream, proc): self._stream = stream self._proc = proc def close(self): self._stream.close() returncode = self._proc.wait() if returncode == 0: return None if name == 'nt': return returncode else: return returncode << 8 # Shift left to match old behavior def __enter__(self): return self def __exit__(self, *args): self.close() def __getattr__(self, name): return getattr(self._stream, name) def __iter__(self): return iter(self._stream) # Supply os.fdopen() def fdopen(fd, *args, **kwargs): if not isinstance(fd, int): raise TypeError("invalid fd type (%s, expected integer)" % type(fd)) import io return io.open(fd, *args, **kwargs)
ms-iot/python
cpython/Lib/os.py
Python
bsd-3-clause
36,313
[ "VisIt" ]
ffc581ca4370d475264c67d88825f99b422ce838123a3c92811a7405aa4dacc5
#!/usr/local/bin/python # -*- coding: utf-8 -*- from addict import Dict # from data.pokemon_data import pokemon from lemonyellow.core.helpers import namedtuple_with_defaults # Effects: GetBike = "Get Bike" RideBike = "Where do you want to go?" BoostMaxPP20Percent = "What it says, m'boy." LevelUp = "LEVEL UP" UnlockSilph = "UnlockSilph" UnlockBlaineGym = "UnlockBlaineGym" StoreCoins = "storecoins" TeleportOut = "TeleportOut" SafariZoneWarden = "Warden gets his teeth back!" SplitXP = "SplitXP" GuardSpec = "GuardSpec" ItemFinder = "Itemfinder" EnableLift = "True/False" OaksParcel = "Man, I never liked that mission" EscapeWildBattle = "What it says." BoardSSAnne = "Set boardpass to True" SeeGhost = "Set seeghost to True" ShowMap = "ShowMap" poke_Na = "N/A!" poke_able = "Able!" ShareXP = "ShareXP" def is_valid(item): """ Verify that an item object or name is valid. """ # if we got passed an item object if item in list_of_items: return True for doohickey in list_of_items: if doohickey.name.lower() == str(item).lower(): return True return False def convert_to_object(itemstr): """ Takes a string of an item name and returns the appropriate object. """ for thing in list_of_items: if thing.name.lower() == itemstr.lower(): return thing def CureStatus(status_type, slot_num=0): """takes Burn, Poison, Sleep, Burn, Freeze, Paralyze, All""" pass def GetPokemon(name): """takes any pokemon name as specified in the pokemon_data sheet""" pass def RestorePP(amt, restore_type): # Amt + Single move / All moves pass def RestoreHP(amt): pass def RestoreMaxPP(singleAll): pass def Repel(stepnum): pass def Revive(revive_type): """takes 'Full' or 'Half""" pass def BoostStat(statname): """accepts Special, HP, Defense, Attack, CriticalHit""" pass def Pokeball(ball_type): """accepts "Poke", "Great", "Ultra", "Master", "Safari". Also, you need to find sales information on the ultra ball.""" pass def EvoStone(stone_type): """accepts Leaf, Moon, Fire, Thunder, Water def EvoAble(pslot,itemslot,stone_t): if pslot.pokebase.lvl_evolve == stone_t: try: itemslot = poke_able except IndexError: pass if stone_type == 'Leaf': EvoAble(Player.slot1, itemdisplay.slot1, 'Leaf_Stone') EvoAble(Player.slot2, itemdisplay.slot2, 'Leaf_Stone') EvoAble(Player.slot3, itemdisplay.slot3, 'Leaf_Stone') EvoAble(Player.slot4, itemdisplay.slot4, 'Leaf_Stone') EvoAble(Player.slot5, itemdisplay.slot5, 'Leaf_Stone') EvoAble(Player.slot6, itemdisplay.slot6, 'Leaf_Stone') if stone_type == 'Moon': EvoAble(Player.slot1, itemdisplay.slot1, 'Moon_Stone') EvoAble(Player.slot2, itemdisplay.slot2, 'Moon_Stone') EvoAble(Player.slot3, itemdisplay.slot3, 'Moon_Stone') EvoAble(Player.slot4, itemdisplay.slot4, 'Moon_Stone') EvoAble(Player.slot5, itemdisplay.slot5, 'Moon_Stone') EvoAble(Player.slot6, itemdisplay.slot6, 'Moon_Stone') if stone_type == 'Fire': EvoAble(Player.slot1, itemdisplay.slot1, 'Fire_Stone') EvoAble(Player.slot2, itemdisplay.slot2, 'Fire_Stone') EvoAble(Player.slot3, itemdisplay.slot3, 'Fire_Stone') EvoAble(Player.slot4, itemdisplay.slot4, 'Fire_Stone') EvoAble(Player.slot5, itemdisplay.slot5, 'Fire_Stone') EvoAble(Player.slot6, itemdisplay.slot6, 'Fire_Stone') if stone_type == 'Thunder': EvoAble(Player.slot1, itemdisplay.slot1, 'Thunder_Stone') EvoAble(Player.slot2, itemdisplay.slot2, 'Thunder_Stone') EvoAble(Player.slot3, itemdisplay.slot3, 'Thunder_Stone') EvoAble(Player.slot4, itemdisplay.slot4, 'Thunder_Stone') EvoAble(Player.slot5, itemdisplay.slot5, 'Thunder_Stone') EvoAble(Player.slot6, itemdisplay.slot6, 'Thunder_Stone') if stone_type == 'Water': EvoAble(Player.slot1, itemdisplay.slot1, 'Water_Stone') EvoAble(Player.slot2, itemdisplay.slot2, 'Water_Stone') EvoAble(Player.slot3, itemdisplay.slot3, 'Water_Stone') EvoAble(Player.slot4, itemdisplay.slot4, 'Water_Stone') EvoAble(Player.slot5, itemdisplay.slot5, 'Water_Stone') EvoAble(Player.slot6, itemdisplay.slot6, 'Water_Stone')""" def FishRod(fishrod_type): """accepts Old, Good, or Super""" pass def FullRestore(): CureStatus("All") RestoreHP(9001) def BattleStatus(statname): """Affects Accuracy, Attack, Defense, SpAtk, or Speed for one battle.""" pass def ItemDisplayClear(): itemdisplay.slot1 = "N/A!" itemdisplay.slot2 = "N/A!" itemdisplay.slot3 = "N/A!" itemdisplay.slot4 = "N/A!" itemdisplay.slot5 = "N/A!" itemdisplay.slot6 = "N/A!" class itemdisplay: # what did I make this for? It seems like such a dumb idea now. slot1 = "N/A!" slot2 = "N/A!" slot3 = "N/A!" slot4 = "N/A!" slot5 = "N/A!" slot6 = "N/A!" # ******************************************* # DO NOT TOUCH ME _item_fields = "name cost sell affect description" _item_defaults = ["Unknown", None, None, None, "DEBUG"] itembase = namedtuple_with_defaults("item", _item_fields, _item_defaults) class item(itembase): # Yay for subclassing! def __new__(cls, **kwargs): self = super(item, cls).__new__(cls, **kwargs) return self def is_key_item(self): return True if self in key_items else False # ******************************************* antidote = item( name="Antidote", cost=100, sell=50, affect=CureStatus("Poison"), ) awakening = item( name="Awakening", cost=250, sell=125, affect=CureStatus("Sleep"), ) bicycle = item( name="Bicycle", affect=RideBike, ) bike_voucher = item( name="Bike Voucher", affect=GetBike, ) burn_heal = item( name="Burn Heal", cost=250, sell=125, affect=CureStatus("Burn"), ) calcium = item( name="Calcium", cost=9800, sell=4900, affect=BoostStat("SpAtk"), ) carbos = item( name="Carbos", cost=9800, sell=4900, affect=BoostStat("Speed"), ) card_key = item( name="Card key", affect=UnlockSilph, ) coin_case = item( name="Coin Case", affect=StoreCoins, ) dire_hit = item( name="Dire Hit", cost=650, sell=375, affect=BoostStat("CriticalHit"), ) dome_fossil = item( name="Dome Fossil", affect=GetPokemon("kabuto"), ) elixer = item( name="Elixer", affect=RestorePP(10, "single"), ) escape_rope = item( name="Escape Rope", cost=550, sell=275, affect=TeleportOut, ) ether = item( name="Ether", affect=RestorePP(10, "single"), ) exp_share = item( # Note: In the original games, the EXP Share was called the EXP All and had # slightly different mechanics to what the EXP Share does. Because of # Nintendo's rebranding of the EXP All to the EXP Share in all subsequent # games, I am including the item as the EXP Share with its included logic. name="EXP Share", affect=ShareXP, ) fire_stone = item( name="Fire Stone", cost=2100, sell=1050, affect=EvoStone("Fire"), ) fresh_water = item( name="Fresh Water", cost=200, sell=100, affect=RestoreHP(50), ) full_heal = item( name="Full Heal", cost=600, sell=300, affect=CureStatus("All"), ) full_restore = item( name="Full Restore", cost=3000, sell=1500, affect=FullRestore, ) gold_teeth = item( name="Gold Teeth", affect=SafariZoneWarden, ) good_rod = item( name="Good Rod", affect=FishRod("Good"), ) great_ball = item( name="Great Ball", cost=600, sell=300, affect=Pokeball("Great"), ) guard_spec = item( name="Guard Spec", cost=700, sell=350, affect=GuardSpec, ) helix_fossil = item( name="Helix Fossil", affect=GetPokemon("omanyte"), ) hp_up = item( name="HP Up", cost=9800, sell=4900, affect=BoostStat("HP"), ) hyper_potion = item( name="Hyper Potion", cost=1500, sell=750, affect=RestoreHP(200), ) ice_heal = item( name="Ice Heal", cost=250, sell=125, affect=CureStatus("Freeze"), ) iron = item( name="Iron", cost=9800, sell=4900, affect=BoostStat("Defense"), ) item_finder = item( name="Item Finder", affect=ItemFinder, ) leaf_stone = item( name="Leaf Stone", cost=2100, sell=1050, affect=EvoStone("Leaf"), ) lemonade = item( name="Lemonade", cost=350, sell=175, affect=RestoreHP(80), ) lift_key = item( name="Lift Key", affect=EnableLift, ) master_ball = item( name="Master Ball", affect=Pokeball("Master"), ) max_elixir = item( name="Max Elixir", affect=RestorePP("max", "all"), ) max_ether = item( name="Max Ether", affect=RestorePP("max", "single"), ) max_potion = item( name="Max Potion", cost=2500, sell=1250, affect=RestoreHP("max"), ) max_repel = item( name="Max Repel", cost=700, sell=350, affect=Repel(250), ) max_revive = item( name="Max Revive", sell=2000, affect=Revive("Full"), ) moon_stone = item( name="Moon Stone", affect=EvoStone("Moon"), ) nugget = item( name="Nugget", sell=5000, ) oaks_parcel = item( name="Oak's Parcel", affect=OaksParcel, ) old_amber = item( name="Old Amber", affect=GetPokemon("aerodactyl"), ) old_rod = item( name="Old Rod", affect=FishRod("Old"), ) paralyze_heal = item( name="Paralyze Heal", cost=200, sell=100, affect=CureStatus("Paralyze"), ) poke_ball = item( name="Poke Ball", cost=200, sell=100, affect=Pokeball("Poke"), ) poke_doll = item( name="Poke Doll", cost=1000, sell=500, affect=EscapeWildBattle, ) poke_flute = item( name="Poke Flute", affect=CureStatus("Sleep"), ) potion = item( name="Potion", cost=300, sell=150, affect=RestoreHP(20), ) pp_up = item( name="PP Up", affect=BoostMaxPP20Percent, ) rare_candy = item( name="Rare Candy", sell=2400, affect=LevelUp, ) protein = item( name="Protein", cost=9800, sell=4900, affect=BoostStat("Attack"), ) repel = item( name="Repel", cost=350, sell=175, affect=Repel(100), ) revive = item( name="Revive", cost=1500, sell=750, affect=Revive("Half"), ) s_s_ticket = item( name="S.S. Ticket", affect=BoardSSAnne, ) safari_ball = item( name="Safari Ball", affect=Pokeball("Safari"), ) secret_key = item( name="Secret Key", affect=UnlockBlaineGym, ) silph_scope = item( name="Silph Scope", affect=SeeGhost, ) soda_pop = item( name="Soda Pop", cost=300, sell=150, affect=RestoreHP(60), ) super_potion = item( name="Super Potion", cost=700, sell=350, affect=RestoreHP(70), ) super_repel = item( name="Super Repel", cost=500, sell=250, affect=Repel(200), ) super_rod = item( name="Super Rod", affect=FishRod("Super"), ) thunder_stone = item( name="Thunder Stone", cost=2100, sell=1050, affect=EvoStone("Thunder"), ) town_map = item( name="Town Map", affect=ShowMap, ) ultra_ball = item( name="Ultra Ball", cost=1200, sell=600, affect=Pokeball("Ultra"), ) water_stone = item( name="Water Stone", cost=2100, sell=1050, affect=EvoStone("Water"), ) x_accuracy = item( name="X Accuracy", cost=950, sell=475, affect=BattleStatus("Accuracy"), ) x_attack = item( name="X Attack", cost=500, sell=250, affect=BattleStatus("Attack"), ) x_defend = item( name="X Defend", cost=550, sell=275, affect=BattleStatus("Defense"), ) x_special_attack = item( name="X Sp. Atk", cost=350, sell=175, affect=BattleStatus("SpAtk"), ) x_special_defend = item( name="X Sp. Def", cost=350, sell=175, affect=BattleStatus("SpDef"), ) x_speed = item( name="X Speed", cost=350, sell=175, affect=BattleStatus("Speed"), ) zinc = item( name="Zinc", cost=9800, sell=4900, affect=BoostStat("SpDef"), ) items = Dict() items.antidote = antidote items.awakening = awakening items.bicycle = bicycle items.bike_voucher = bike_voucher items.burn_heal = burn_heal items.calcium = calcium items.carbos = carbos items.card_key = card_key items.coin_case = coin_case items.dire_hit = dire_hit items.dome_fossil = dome_fossil items.elixer = elixer items.escape_rope = escape_rope items.ether = ether items.exp_share = exp_share items.fire_stone = fire_stone items.fresh_water = fresh_water items.full_heal = full_heal items.full_restore = full_restore items.gold_teeth = gold_teeth items.good_rod = good_rod items.great_ball = great_ball items.guard_spec = guard_spec items.helix_fossil = helix_fossil items.hp_up = hp_up items.hyper_potion = hyper_potion items.ice_heal = ice_heal items.iron = iron items.item_finder = item_finder items.leaf_stone = leaf_stone items.lemonade = lemonade items.lift_key = lift_key items.master_ball = master_ball items.max_elixir = max_elixir items.max_ether = max_ether items.max_potion = max_potion items.max_repel = max_repel items.max_revive = max_revive items.moon_stone = moon_stone items.nugget = nugget items.oaks_parcel = oaks_parcel items.old_amber = old_amber items.old_rod = old_rod items.paralyze_heal = paralyze_heal items.poke_ball = poke_ball items.poke_doll = poke_doll items.poke_flute = poke_flute items.potion = potion items.pp_up = pp_up items.rare_candy = rare_candy items.protein = protein items.repel = repel items.revive = revive items.s_s_ticket = s_s_ticket items.safari_ball = safari_ball items.secret_key = secret_key items.silph_scope = silph_scope items.soda_pop = soda_pop items.super_potion = super_potion items.super_repel = super_repel items.super_rod = super_rod items.thunder_stone = thunder_stone items.town_map = town_map items.ultra_ball = ultra_ball items.water_stone = water_stone items.x_accuracy = x_accuracy items.x_attack = x_attack items.x_defend = x_defend items.x_special_attack = x_special_attack items.x_special_defend = x_special_defend items.x_speed = x_speed items.zinc = zinc key_items = [ bicycle, bike_voucher, card_key, coin_case, dome_fossil, gold_teeth, good_rod, helix_fossil, item_finder, lift_key, oaks_parcel, old_amber, old_rod, poke_flute, secret_key, silph_scope, s_s_ticket, super_rod, town_map, ] list_of_items = [ antidote, awakening, bicycle, bike_voucher, burn_heal, calcium, carbos, card_key, coin_case, dire_hit, dome_fossil, elixer, escape_rope, ether, exp_share, fire_stone, fresh_water, full_heal, full_restore, gold_teeth, good_rod, great_ball, guard_spec, helix_fossil, hp_up, hyper_potion, ice_heal, iron, item_finder, leaf_stone, lemonade, lift_key, master_ball, max_elixir, max_ether, max_potion, max_repel, max_revive, moon_stone, nugget, oaks_parcel, old_amber, old_rod, paralyze_heal, poke_ball, poke_doll, poke_flute, potion, pp_up, rare_candy, protein, repel, revive, s_s_ticket, safari_ball, secret_key, silph_scope, soda_pop, super_potion, super_repel, super_rod, thunder_stone, town_map, ultra_ball, water_stone, x_accuracy, x_attack, x_defend, x_special_attack, x_special_defend, x_speed, zinc, ]
itsthejoker/Pokemon-Homage
lemonyellow/data/items.py
Python
mit
16,605
[ "Amber" ]
f469df9d85e9c222dc2a2989674ca96d39a0dcc688b34d87817d1dcfb2f1763b
from common.exceptions import TaskListError import logging log = logging.getLogger(__name__) class TaskList(object): def __init__(self): self.tasks = set() self.tasks_completed = [] def load(self, function, manifest, *args): getattr(manifest.modules['provider'], function)(self.tasks, manifest, *args) for plugin in manifest.modules['plugins']: fn = getattr(plugin, function, None) if callable(fn): fn(self.tasks, manifest, *args) def run(self, info={}, dry_run=False): task_list = self.create_list() log.debug('Tasklist:\n\t{list}'.format(list='\n\t'.join(map(repr, task_list)))) for task in task_list: if hasattr(task, 'description'): log.info(task.description) else: log.info('Running {task}'.format(task=task)) if not dry_run: task.run(info) self.tasks_completed.append(task) def create_list(self): from common.phases import order graph = {} for task in self.tasks: self.check_ordering(task) successors = set() successors.update(task.successors) successors.update(filter(lambda succ: task in succ.predecessors, self.tasks)) succeeding_phases = order[order.index(task.phase) + 1:] successors.update(filter(lambda succ: succ.phase in succeeding_phases, self.tasks)) graph[task] = filter(lambda succ: succ in self.tasks, successors) components = self.strongly_connected_components(graph) cycles_found = 0 for component in components: if len(component) > 1: cycles_found += 1 log.debug('Cycle: {list}\n'.format(list=', '.join(map(repr, component)))) if cycles_found > 0: msg = ('{0} cycles were found in the tasklist, ' 'consult the logfile for more information.'.format(cycles_found)) raise TaskListError(msg) sorted_tasks = self.topological_sort(graph) return sorted_tasks def check_ordering(self, task): for successor in task.successors: if successor.phase > successor.phase: msg = ("The task {task} is specified as running before {other}, " "but its phase '{phase}' lies after the phase '{other_phase}'" .format(task=task, other=successor, phase=task.phase, other_phase=successor.phase)) raise TaskListError(msg) for predecessor in task.predecessors: if task.phase < predecessor.phase: msg = ("The task {task} is specified as running after {other}, " "but its phase '{phase}' lies before the phase '{other_phase}'" .format(task=task, other=predecessor, phase=task.phase, other_phase=predecessor.phase)) raise TaskListError(msg) def strongly_connected_components(self, graph): # Source: http://www.logarithmic.net/pfh-files/blog/01208083168/sort.py # Find the strongly connected components in a graph using Tarjan's algorithm. # graph should be a dictionary mapping node names to lists of successor nodes. result = [] stack = [] low = {} def visit(node): if node in low: return num = len(low) low[node] = num stack_pos = len(stack) stack.append(node) for successor in graph[node]: visit(successor) low[node] = min(low[node], low[successor]) if num == low[node]: component = tuple(stack[stack_pos:]) del stack[stack_pos:] result.append(component) for item in component: low[item] = len(graph) for node in graph: visit(node) return result def topological_sort(self, graph): # Source: http://www.logarithmic.net/pfh-files/blog/01208083168/sort.py count = {} for node in graph: count[node] = 0 for node in graph: for successor in graph[node]: count[successor] += 1 ready = [node for node in graph if count[node] == 0] result = [] while ready: node = ready.pop(-1) result.append(node) for successor in graph[node]: count[successor] -= 1 if count[successor] == 0: ready.append(successor) return result
brianspeir/Vanilla
vendor/bootstrap-vz/base/tasklist.py
Python
bsd-3-clause
3,837
[ "VisIt" ]
dd1452b31a5691a60575603cc18dcbf699c68c54cee4e2536414655346fe4139
""" Data-driven tests for reads """ from __future__ import division from __future__ import print_function from __future__ import unicode_literals import collections import ga4gh.backend as backend import ga4gh.datamodel as datamodel import ga4gh.datamodel.datasets as datasets import ga4gh.datamodel.reads as reads import ga4gh.datamodel.references as references import ga4gh.protocol as protocol import tests.datadriven as datadriven import tests.utils as utils import pysam def testReads(): testDataDir = "tests/data/dataset1/reads" for test in datadriven.makeTests( testDataDir, ReadGroupSetTest, '*.bam'): yield test class ReadGroupSetInfo(object): """ Container class for information about a read group set """ def __init__(self, samFile): self.numAlignedReads = samFile.mapped self.numUnalignedReads = samFile.unmapped class ReadGroupInfo(object): """ Container class for information about a read group """ def __init__(self, gaReadGroupSet, samFile, readGroupName): self.gaReadGroup = reads.AbstractReadGroup( gaReadGroupSet, readGroupName) self.id = self.gaReadGroup.getId() self.samFile = samFile self.mappedReads = collections.defaultdict(list) for read in self.samFile: tags = dict(read.tags) if 'RG' not in tags or tags['RG'] != readGroupName: continue if read.reference_id != -1: # mapped read referenceName = self.samFile.getrname(read.reference_id) self.mappedReads[referenceName].append(read) self.numAlignedReads = -1 self.numUnalignedReads = -1 self.programs = [] if 'PG' in self.samFile.header: self.programs = self.samFile.header['PG'] self.sampleId = None self.description = None self.predictedInsertSize = None self.instrumentModel = None self.sequencingCenter = None self.experimentDescription = None self.library = None self.platformUnit = None self.runTime = None if 'RG' in self.samFile.header: readGroupHeader = [ rgHeader for rgHeader in self.samFile.header['RG'] if rgHeader['ID'] == readGroupName][0] self.sampleId = readGroupHeader.get('SM', None) self.description = readGroupHeader.get('DS', None) if 'PI' in readGroupHeader: self.predictedInsertSize = int(readGroupHeader['PI']) self.instrumentModel = readGroupHeader.get('PL', None) self.sequencingCenter = readGroupHeader.get('CN', None) self.experimentDescription = readGroupHeader.get('DS', None) self.library = readGroupHeader.get('LB', None) self.platformUnit = readGroupHeader.get('PU', None) self.runTime = readGroupHeader.get('DT', None) class ReadGroupSetTest(datadriven.DataDrivenTest): """ Data driven test for read group sets """ def __init__(self, localId, dataPath): self._backend = backend.AbstractBackend() self._referenceSet = None self._dataset = datasets.AbstractDataset("ds") self._readGroupInfos = {} self._readGroupSetInfo = None self._samFile = pysam.AlignmentFile(dataPath) self._readReferences() super(ReadGroupSetTest, self).__init__(localId, dataPath) self._readAlignmentInfo() def _readReferences(self): # Read the reference information from the samfile referenceSetName = None for referenceInfo in self._samFile.header['SQ']: if 'AS' not in referenceInfo: infoDict = reads.parseMalformedBamHeader(referenceInfo) # If there's still no reference set name in there we use # a default name. name = infoDict.get("AS", "Default") if referenceSetName is None: referenceSetName = name self._addReferenceSet(referenceSetName) else: self.assertEqual(referenceSetName, name) self._addReference(infoDict['SN']) def _addReferenceSet(self, referenceSetName): self._referenceSet = references.AbstractReferenceSet(referenceSetName) self._backend.addReferenceSet(self._referenceSet) def _addReference(self, referenceName): reference = references.AbstractReference( self._referenceSet, referenceName) self._referenceSet.addReference(reference) def _readAlignmentInfo(self): self._readGroupSetInfo = ReadGroupSetInfo(self._samFile) if 'RG' in self._samFile.header: readGroupHeaders = self._samFile.header['RG'] readGroupNames = [ readGroupHeader['ID'] for readGroupHeader in readGroupHeaders] else: readGroupNames = ['default'] for readGroupName in readGroupNames: readGroupInfo = ReadGroupInfo( self._gaObject, self._samFile, readGroupName) self._readGroupInfos[readGroupName] = readGroupInfo def getDataModelInstance(self, localId, dataPath): return reads.HtslibReadGroupSet( self._dataset, localId, dataPath, self._backend) def getProtocolClass(self): return protocol.ReadGroupSet def testSampleIdEtc(self): # test that sampleId and other misc fields are set correctly readGroupSet = self._gaObject for readGroup in readGroupSet.getReadGroups(): readGroupInfo = self._readGroupInfos[readGroup.getLocalId()] gaReadGroup = readGroup.toProtocolElement() self.assertEqual( readGroupInfo.sampleId, gaReadGroup.sampleId) self.assertEqual( readGroupInfo.predictedInsertSize, gaReadGroup.predictedInsertSize) self.assertEqual( readGroupInfo.description, gaReadGroup.description) def testExperiments(self): # test that the experiment field is set correctly readGroupSet = self._gaObject for readGroup in readGroupSet.getReadGroups(): readGroupInfo = self._readGroupInfos[readGroup.getLocalId()] gaReadGroup = readGroup.toProtocolElement() self.assertIn( "experiment", datamodel.CompoundId.deobfuscate(gaReadGroup.experiment.id)) self.assertEqual( readGroupInfo.instrumentModel, gaReadGroup.experiment.instrumentModel) self.assertEqual( readGroupInfo.sequencingCenter, gaReadGroup.experiment.sequencingCenter) self.assertEqual( readGroupInfo.experimentDescription, gaReadGroup.experiment.description) self.assertEqual( readGroupInfo.library, gaReadGroup.experiment.library) self.assertEqual( readGroupInfo.platformUnit, gaReadGroup.experiment.platformUnit) self.assertEqual( readGroupInfo.runTime, gaReadGroup.experiment.runTime) def testPrograms(self): # test that program info is set correctly readGroupSet = self._gaObject for readGroup in readGroupSet.getReadGroups(): readGroupInfo = self._readGroupInfos[readGroup.getLocalId()] gaPrograms = readGroup.getPrograms() htslibPrograms = readGroupInfo.programs for gaProgram, htslibProgram in utils.zipLists( gaPrograms, htslibPrograms): self.assertEqual( gaProgram.id, htslibProgram.get('ID')) self.assertEqual( gaProgram.commandLine, htslibProgram.get('CL', None)) self.assertEqual( gaProgram.name, htslibProgram.get('PN', None)) self.assertEqual( gaProgram.prevProgramId, htslibProgram.get('PP', None)) self.assertEqual( gaProgram.version, htslibProgram.get('VN', None)) def testReadGroupStats(self): # test that the stats attrs are populated correctly readGroupSet = self._gaObject gaReadGroupSet = readGroupSet.toProtocolElement() readGroupSetInfo = self._readGroupSetInfo self.assertEqual( readGroupSet.getNumAlignedReads(), readGroupSetInfo.numAlignedReads) self.assertEqual( readGroupSet.getNumUnalignedReads(), readGroupSetInfo.numUnalignedReads) self.assertEqual( gaReadGroupSet.stats.alignedReadCount, readGroupSetInfo.numAlignedReads) self.assertEqual( gaReadGroupSet.stats.unalignedReadCount, readGroupSetInfo.numUnalignedReads) for readGroup in readGroupSet.getReadGroups(): gaReadGroup = readGroup.toProtocolElement() self.assertEqual( readGroup.getNumAlignedReads(), -1) self.assertEqual( readGroup.getNumUnalignedReads(), -1) self.assertEqual( gaReadGroup.stats.alignedReadCount, -1) self.assertEqual( gaReadGroup.stats.unalignedReadCount, -1) def testValidateObjects(self): # test that validation works on read groups and reads readGroupSet = self._gaObject for readGroup in readGroupSet.getReadGroups(): self.assertValid( protocol.ReadGroup, readGroup.toProtocolElement().toJsonDict()) for reference in self._referenceSet.getReferences(): for gaAlignment in readGroup.getReadAlignments(reference): self.assertValid( protocol.ReadAlignment, gaAlignment.toJsonDict()) def testGetReadAlignmentsRefId(self): # test that searching with a reference id succeeds readGroupSet = self._gaObject for readGroup in readGroupSet.getReadGroups(): readGroupInfo = self._readGroupInfos[readGroup.getLocalId()] for name, alignments in readGroupInfo.mappedReads.items(): reference = self._referenceSet.getReferenceByName(name) self.assertAlignmentListsEqual( list(readGroup.getReadAlignments(reference)), alignments, readGroupInfo) def testGetReadAlignmentsStartEnd(self): # test that searching with start and end coords succeeds readGroupSet = self._gaObject for readGroup in readGroupSet.getReadGroups(): readGroupInfo = self._readGroupInfos[readGroup.getLocalId()] for name, alignments, in readGroupInfo.mappedReads.items(): bigNumThatPysamWontChokeOn = 2**30 reference = self._referenceSet.getReferenceByName(name) gaAlignments = list(readGroup.getReadAlignments( reference, 0, bigNumThatPysamWontChokeOn)) self.assertAlignmentListsEqual( gaAlignments, alignments, readGroupInfo) def testGetReadAlignmentSearchRanges(self): # test that various range searches work readGroupSet = self._gaObject for readGroup in readGroupSet.getReadGroups(): readGroupInfo = self._readGroupInfos[readGroup.getLocalId()] for name in readGroupInfo.mappedReads.keys(): reference = self._referenceSet.getReferenceByName(name) alignments = list(readGroup.getReadAlignments(reference)) length = len(alignments) if length < 2: continue positions = [read.alignment.position.position for read in alignments] if length != len(set(positions)): continue begin = positions[0] end = positions[-1] self.assertGetReadAlignmentsRangeResult( readGroup, reference, begin, end + 1, length) self.assertGetReadAlignmentsRangeResult( readGroup, reference, begin, end, length - 1) self.assertGetReadAlignmentsRangeResult( readGroup, reference, begin, begin, 0) def assertGetReadAlignmentsRangeResult( self, readGroup, reference, start, end, result): alignments = list(readGroup.getReadAlignments(reference, start, end)) self.assertEqual(len(alignments), result) def assertAlignmentListsEqual( self, gaAlignments, pysamAlignments, readGroupInfo): for gaAlignment, pysamAlignment in utils.zipLists( gaAlignments, pysamAlignments): self.assertAlignmentsEqual( gaAlignment, pysamAlignment, readGroupInfo) def assertAlignmentsEqual(self, gaAlignment, pysamAlignment, readGroupInfo): if pysamAlignment.query_qualities is None: self.assertEqual(gaAlignment.alignedQuality, []) else: self.assertEqual( gaAlignment.alignedQuality, list(pysamAlignment.query_qualities)) self.assertEqual( gaAlignment.alignedSequence, pysamAlignment.query_sequence) self.assertEqual( gaAlignment.alignment.mappingQuality, pysamAlignment.mapping_quality) self.assertEqual( gaAlignment.alignment.position.referenceName, readGroupInfo.samFile.getrname(pysamAlignment.reference_id)) self.assertEqual( gaAlignment.alignment.position.position, pysamAlignment.reference_start) # TODO test reverseStrand on position and on nextMatePosition once # it has been implemented. self.assertCigarEqual( gaAlignment.alignment.cigar, pysamAlignment.cigar) self.assertFlag( gaAlignment.duplicateFragment, pysamAlignment, reads.SamFlags.DUPLICATE_FRAGMENT) self.assertFlag( gaAlignment.failedVendorQualityChecks, pysamAlignment, reads.SamFlags.FAILED_VENDOR_QUALITY_CHECKS) self.assertEqual( gaAlignment.fragmentLength, pysamAlignment.template_length) self.assertEqual( gaAlignment.fragmentName, pysamAlignment.query_name) compoundId = datamodel.ReadAlignmentCompoundId( readGroupInfo.gaReadGroup.getCompoundId(), pysamAlignment.query_name) self.assertEqual(gaAlignment.id, str(compoundId)) self.assertEqual( gaAlignment.info, {key: [str(value)] for key, value in pysamAlignment.tags}) if pysamAlignment.next_reference_id != -1: self.assertEqual( gaAlignment.nextMatePosition.position, pysamAlignment.next_reference_start) self.assertEqual( gaAlignment.nextMatePosition.referenceName, readGroupInfo.samFile.getrname( pysamAlignment.next_reference_id)) else: self.assertIsNone(gaAlignment.nextMatePosition) self.assertFlag( gaAlignment.properPlacement, pysamAlignment, reads.SamFlags.PROPER_PLACEMENT) self.assertEqual( gaAlignment.readGroupId, readGroupInfo.id) self.assertFlag( gaAlignment.secondaryAlignment, pysamAlignment, reads.SamFlags.SECONDARY_ALIGNMENT) self.assertFlag( gaAlignment.supplementaryAlignment, pysamAlignment, reads.SamFlags.SUPPLEMENTARY_ALIGNMENT) # TODO test readNumber and numberReads (nice naming guys...) once # we have figured out what they mean and how the map back to # the SAM flags 0x1, 0x40 and 0x80 def assertFlag(self, gaAlignmentAttr, pysamAlignment, mask): flagSet = reads.SamFlags.isFlagSet(pysamAlignment.flag, mask) self.assertEqual(gaAlignmentAttr, flagSet) def assertCigarEqual(self, gaCigar, pysamCigar): self.assertEqual(len(gaCigar), len(pysamCigar)) for i, gaCigarUnit in enumerate(gaCigar): operation, length = pysamCigar[i] gaCigarUnitOperation = reads.SamCigar.ga2int( gaCigarUnit.operation) self.assertEqual( gaCigarUnitOperation, operation) self.assertEqual( gaCigarUnit.operationLength, length)
ekalosak/server
tests/datadriven/test_reads.py
Python
apache-2.0
16,772
[ "pysam" ]
385ec16ae035c979635f2e6e1a6c289cce5a4bb24921841b9fe3c3f87155c043
##################################################################### # # # Refinement Script with SHELX and CCP4/REFMAC5 # # # # Copyright: Molecular Images 2005 # # # # This script is distributed under the same conditions as MIFit # # # ##################################################################### import sys import os import time import string import math import dircache import getopt import ccp4check def Usage(): print "Usage: %s [options]" % sys.argv[0] print "Options are:" print " -p,--pdbfile=FILE the pdb file" print " -m,--mtzfile=FILE the mtz file" print " -l,--libfile=FILE the library file. Default: no file" print " -d,--workdir=DIR The working directory" print " -e,--engine=ENGINE One of refmac5 (default), shelx, or rigid" print " -w,--weight=NUM The weighting factor. Default: 0.1" print " -c,--cycles=NUM Number of refinement cycles to run" print " --water_cycles=NUM Number of water cycles to run. Default: 0" print " -t,--tls_file=FILE TLS specification file. Default: no file" print " -s,--shelx_dir=DIR Path to shelx executables. Default: $SHELXBIN" print " -h,--mifithome=DIR Path to MIFit. Default: no path" print " --bref_type=TYPE B-factor refinement type: anisotropic or none. Default: none" print " --max_res=NUM Maximum resolution. Default: no value" print " -?,--help this help" def Run(argv=None): if argv is None: argv=sys.argv # Path to MIFit installation to find phi-psi data mifit_root = 'none' # Initialize quote = """'""" job_prefix = 'refine_' pdbfile = 'none' mtzfile = 'none' libfile = 'none' workingdir = 'none' runid = '1' projectlog = 'project_history.txt' ref_engine = 'refmac5' flabel = 'none' sigflabel = 'none' rfreelabel = 'none' anomlabel = 'none' siganomlabel = 'none' resolution_mtz = 'none' rwork = 'none' rfree = 'none' rmsd_bonds = 'none' percent_phi_psi = 'none' percent_rotamer = 'none' shelxpro = 'none' shelxh = 'none' max_res = 'none' weight = 'none' bref_type = 'none' freeflag = '0' number_molecules = '1' res_number_X_high = 0 int_res_number = 0 number_sym_ops = 1 cycles = '5' water_pick = 'no' water_cycles = 0 big_cycles = 1 water_count = 0 missing_protein_chain = 'no' validate = 'yes' max_conformers = 10 disorder = 'no' extra_links = 'no' resolution_output = '?' tlsfile = 'none' shelx_directory = 'none' seq_chain_prev = '?' filename_log_full = 'none' filename_pdb_full = 'none' filename_mtz_full = 'none' filename_refmac_full = 'none' filename_anom_full = 'none' number_chain_list = 0 aList_chains = [] aList_nterm = [] aList_cterm = [] parseLine = [] aLine = [] labelList = [] colList = [] aList_chain_store = [] aList_res_number_store = [] aList_res_name_store = [] aList_sequence_chain_id = [] pdb_annotate = [] aList_sequence_chain = [] aList_sequence_number = [] aList_sequence_resname = [] aList_missing_residues = [] aList_current_residue_atoms = [] aList_allatoms_chain = [] aList_allatoms_res_number = [] aList_allatoms_res_name = [] aList_allatoms_atom_name = [] aList_SEQRES = [] aList_rotamer_chain = [] aList_rotamer_resno = [] aList_rotamer_resname = [] aList_bonds_chain = [] aList_bonds_resno = [] aList_bonds_resname = [] aList_angles_chain = [] aList_angles_resno = [] aList_angles_resname = [] aList_contacts_chain = [] aList_contacts_resno = [] aList_contacts_resname = [] aList_chiral_chain = [] aList_chiral_resno = [] aList_chiral_resname = [] aList_cis_chain = [] aList_cis_resno = [] aList_cis_resname = [] aList_rama_chain = [] aList_rama_resno = [] aList_rama_resname = [] aList_omega_chain = [] aList_omega_resno = [] aList_omega_resname = [] aList_density_chain = [] aList_density_resno = [] aList_density_resname = [] aList_disorder_chain = [] aList_disorder_resno = [] aList_disorder_resname = [] aList_errors = [] aList_phi_all = [] aList_psi_all = [] aList_phipsi_prob_all = [] aList_phi_gly = [] aList_psi_gly = [] aList_phipsi_prob_gly = [] aList_phi_pro = [] aList_psi_pro = [] aList_phipsi_prob_pro = [] aList_peak_x = [] aList_peak_y = [] aList_peak_z = [] aList_x = [] aList_y = [] aList_z = [] bond_list = 'no' angle_list = 'no' contact_list = 'no' chiral_list = 'no' iteration_final = 'no' chain_id_prev = '?' res_number_prev = '?' num_residues = 0.0 amino_acid_count = 0.0 count_phipsi = 0.0 count_rotamer = 0.0 phipsi_gen_datafile = 'none' phipsi_gly_datafile = 'none' phipsi_pro_datafile = 'none' number_phipsi_gen_table = 0 number_phipsi_gly_table = 0 number_phipsi_pro_table = 0 # Omega threshhold is 4 x true sd from peak at 178.9 # allowed phi-psi: gen 99.95% data, 41.5% area, GLY 99.8% data, 63% area, PRO 99.8% data, 18.1% area omega_peak = 178.9 omega_thresh = 4.0 * 5.6 phipsi_thresh_gen = 0.00847 phipsi_thresh_gly = 0.00384 phipsi_thresh_pro = 0.0015 # Atom lists aList_GLY_atoms = ['N','CA','C','O'] aList_ALA_atoms = ['N','CA','C','O','CB'] aList_VAL_atoms = ['N','CA','C','O','CB','CG1','CG2'] aList_ILE_atoms = ['N','CA','C','O','CB','CG1','CG2','CD1'] aList_LEU_atoms = ['N','CA','C','O','CB','CG','CD1','CD2'] aList_PHE_atoms = ['N','CA','C','O','CB','CG','CD1','CE1','CZ','CE2','CD2'] aList_PRO_atoms = ['N','CA','C','O','CB','CG','CD'] aList_MET_atoms = ['N','CA','C','O','CB','CG','SD','CE'] aList_TRP_atoms = ['N','CA','C','O','CB','CG','CD1','NE1','CE2','CD2','CE3','CZ3','CH2','CZ2'] aList_CYS_atoms = ['N','CA','C','O','CB','SG'] aList_SER_atoms = ['N','CA','C','O','CB','OG'] aList_THR_atoms = ['N','CA','C','O','CB','OG1','CG2'] aList_ASN_atoms = ['N','CA','C','O','CB','CG','OD1','ND2'] aList_GLN_atoms = ['N','CA','C','O','CB','CD','CG','OE1','NE2'] aList_TYR_atoms = ['N','CA','C','O','CB','CG','CD1','CE1','CZ','OH','CE2','CD2'] aList_HIS_atoms = ['N','CA','C','O','CB','CG','ND1','CE1','NE2','CD2'] aList_ASP_atoms = ['N','CA','C','O','CB','CG','OD1','OD2'] aList_GLU_atoms = ['N','CA','C','O','CB','CD','CG','OE1','OE2'] aList_LYS_atoms = ['N','CA','C','O','CB','CG','CD','CE','NZ'] aList_ARG_atoms = ['N','CA','C','O','CB','CD','CG','NE','CZ','NH1','NH2'] # Platform test_platform = sys.platform # Read args number_of_args = len(argv) args = argv[1:] optlist, args = getopt.getopt( args,'p:m:l:d:e:w:c:t:s:h:?', ["pdbfile=","mtzfile=","libfile=","workdir=","engine=", "weight=","cycles=","water_cycles=", "tls_file=","shelx_dir=","mifithome=", "bref_type=","max_res=","help"]) number_of_inputs = len(optlist) if number_of_inputs==0: Usage() return count = 0 while count < number_of_inputs: aList = optlist[count] number_of_list_inputs = len(aList) arg_value="" if number_of_list_inputs >=1: arg_value = aList[0] if arg_value == '-?' or arg_value=='--help': Usage() return if number_of_list_inputs >=2: param_value = aList[1] if arg_value == '-p' or arg_value=="--pdbfile": pdbfile = param_value elif arg_value == '-m' or arg_value=="--mtzfile": mtzfile = param_value elif arg_value == '-l' or arg_value=="--libfile": libfile = param_value elif arg_value == '-d' or arg_value=="--workdir": workingdir = param_value elif arg_value == '-e' or arg_value=="--engine": ref_engine = param_value elif arg_value == '-w' or arg_value=="--weight": weight = param_value elif arg_value == '--max_res': max_res = param_value elif arg_value == '-bref_type': bref_type = param_value elif arg_value == '-c' or arg_value=="--cycles": cycles = param_value elif arg_value == '--water_cycles': water_cycles = int(param_value) elif arg_value == '-t' or arg_value=="--tls_file": tlsfile = param_value elif arg_value == '-s' or arg_value=="--shelx_dir": shelx_directory = param_value elif arg_value == '-h' or arg_value=="--mifithome": mifit_root = param_value count = count + 1 ccp4,error = ccp4check.ccp4check() if not ccp4: print '\n' + error + '\n' time.sleep(4) return 1 fileexists = os.path.exists(pdbfile) if fileexists == 0: print 'The PDB file was not found ',pdbfile time.sleep(4) return 1 fileexists = os.path.exists(mtzfile) if fileexists == 0: print 'The MTZ file was not found ',mtzfile time.sleep(4) return 1 fileexists = os.path.exists(workingdir) if fileexists == 0: print 'The working directory was not found ',workingdir time.sleep(4) return 1 fileexists = os.path.exists(libfile) if fileexists == 0 and os.path.basename(libfile) != 'none': print 'The library file was not found ',libfile time.sleep(4) return 1 fileexists = os.path.exists(tlsfile) if fileexists == 0 and os.path.basename(tlsfile) != 'none': print 'The TLS specification file was not found ',tlsfile time.sleep(4) return 1 if ref_engine != 'shelx' and ref_engine != 'rigid' and ref_engine != 'refmac5': print 'The refinement type must be one of shelx/rigid/refmac5' time.sleep(4) return 1 if weight == 'none': weight = '0.1' if water_cycles > 0: water_pick = 'yes' big_cycles = water_cycles + 1 if bref_type == 'none': bref_type = 'isotropic' # Check MIFit installation to access phi-psi data files (try environment variable, default and direct path) if mifit_root != 'none': mifit_root_data = os.path.join(mifit_root,'data') phipsi_gen_datafile = os.path.join(mifit_root_data,'rama500-general.data') phipsi_gly_datafile = os.path.join(mifit_root_data,'rama500-gly-sym-nosec.data') phipsi_pro_datafile = os.path.join(mifit_root_data,'rama500-pro.data') # If needed, get determine SHELX bin directory path if ref_engine == 'shelx': # Determine installation from a possible environment variable find_shelx_bin = os.environ.keys().count('SHELXBIN') if find_shelx_bin != 0: shelx_directory = os.environ['SHELXBIN'] # Determine from input parameter if shelx_directory != 'none': fileexists = os.path.exists(shelx_directory) if fileexists != 0: if test_platform.find('win') > -1: shelxpro = os.path.join(shelx_directory,'shelxpro.exe') shelxh = os.path.join(shelx_directory,'shelxh.exe') else: shelxpro = os.path.join(shelx_directory,'shelxpro') shelxh = os.path.join(shelx_directory,'shelxh') # Confirm we have SHELXH and SHELXPRO fileexists = os.path.exists(shelxpro) if fileexists == 0: print 'The SHELXPRO executable was not found ',shelxpro time.sleep(4) return 1 fileexists = os.path.exists(shelxh) if fileexists == 0: print 'The SHELXH executable was not found ',shelxh time.sleep(4) return 1 # Create a CCP4 temp space for temporary files idcode = '000000' gmt = time.gmtime(time.time()) fmt = '%H%M%S' idcode = time.strftime(fmt,gmt) path_scratch = 'temp_' + idcode working_ccp4_scratch = os.path.join(ccp4.scr,path_scratch) fileexists = os.path.exists(working_ccp4_scratch) if fileexists == 0: os.mkdir(working_ccp4_scratch) os.environ['CCP4_SCR'] = working_ccp4_scratch # Go to working area os.chdir(workingdir) # Copy MTZ file to working area file = open(mtzfile,'rb') allLines = file.readlines() file.close() file = open('mi_refine_unsorted.mtz','wb') file.writelines(allLines) file.close() # Use CCP4/CAD to ensure that data is sorted properly for subsequent CCP4/FFT process fileexists = os.path.exists('mi_refine.mtz') if fileexists != 0: os.remove('mi_refine.mtz') file = open('mi_cad.inp','w') file.write('LABIN FILE_NUMBER 1 ALL\n') file.write('END\n') file.close() runcad = 'cad HKLIN1 mi_refine_unsorted.mtz HKLOUT mi_refine.mtz < mi_cad.inp > mi_cad.log' os.system(runcad) fileexists = os.path.exists('mi_refine.mtz') if fileexists != 0: os.remove('mi_refine_unsorted.mtz') os.remove('mi_cad.log') os.remove('mi_cad.inp') else: print 'The CAD run to resort the data seems to have failed' time.sleep(4) return 1 # Extract MTZ labels for F and SD(F) and Rfree set file = open('mi_mtzdump.inp','w') file.write('HEADER\n') file.write('SYMMETRY\n') file.write('END\n') file.close() runmtz = 'mtzdump HKLIN mi_refine.mtz < mi_mtzdump.inp > mi_mtzdump.log' os.system(runmtz) file = open('mi_mtzdump.log','r') allLines = file.readlines() file.close() os.remove('mi_mtzdump.log') os.remove('mi_mtzdump.inp') read_columns = 'no' read_labels = 'no' read_resolution = 'no' read_cell = 'no' for eachLine in allLines: line_length = len(eachLine) if read_columns == 'yes' and line_length > 1: colList = eachLine.split() read_columns = 'no' if read_labels == 'yes' and line_length > 1: labelList = eachLine.split() read_labels = 'no' if read_resolution == 'yes' and line_length > 1: parseLine = eachLine.split() resolution_mtz = parseLine[5] read_resolution = 'no' if read_cell == 'yes' and line_length > 1: parseLine = eachLine.split() acell_mtz = parseLine[0] bcell_mtz = parseLine[1] ccell_mtz = parseLine[2] alpha_mtz = parseLine[3] beta_mtz = parseLine[4] gamma_mtz = parseLine[5] acell_mtz = float(acell_mtz) bcell_mtz = float(bcell_mtz) ccell_mtz = float(ccell_mtz) alpha_mtz = float(alpha_mtz) beta_mtz = float(beta_mtz) gamma_mtz = float(gamma_mtz) acell_mtz = round(acell_mtz,3) bcell_mtz = round(bcell_mtz,3) ccell_mtz = round(ccell_mtz,3) alpha_mtz = round(alpha_mtz,2) beta_mtz = round(beta_mtz,2) gamma_mtz = round(gamma_mtz,2) read_cell = 'no' if eachLine.find('* Number of Symmetry Operations') > -1: parseLine = eachLine.split('=') number_sym_ops = parseLine[1] number_sym_ops = int(number_sym_ops) if eachLine.find('* Column Labels :') > -1: read_columns = 'yes' if eachLine.find('* Column Types :') > -1: read_labels = 'yes' if eachLine.find('Resolution Range') > -1: read_resolution = 'yes' if eachLine.find('* Cell Dimensions :') > -1: read_cell = 'yes' if eachLine.find('* Space Group =') > -1: # SG number and name eachLine = eachLine.strip() parseLine = eachLine.split('=') space_group_out = parseLine[1] parseLine = space_group_out.split(quote) space_group = parseLine[0] space_group = space_group.strip() space_group_mtz = parseLine[1] space_group_mtz = space_group_mtz.strip() list_length = len(labelList) count = 0 while count < list_length: if labelList[count] == 'F' and flabel == 'none': flabel = colList[count] if labelList[count] == 'Q' and sigflabel == 'none': sigflabel = colList[count] if labelList[count] == 'I' and rfreelabel == 'none': rfreelabel = colList[count] if labelList[count] == 'D' and anomlabel == 'none': anomlabel = colList[count] if labelList[count] == 'Q' and siganomlabel == 'none': if anomlabel != 'none' and colList[count] != sigflabel: siganomlabel = colList[count] count = count + 1 if flabel == 'none' or sigflabel == 'none' or rfreelabel == 'none': print 'MTZ labels for F,sd(F) or Rfree-data could not be established' time.sleep(4) return 1 # Use CCP4/CAD to capture any anomalous difference data if anomlabel != 'none' and siganomlabel != 'none': fileexists = os.path.exists('mi_anommap.mtz') if fileexists != 0: os.remove('mi_anommap.mtz') file = open('mi_cad.inp','w') file.write('LABIN FILE_NUMBER 1 E1=') file.write(anomlabel) file.write(' E2=') file.write(siganomlabel) file.write('\n') # Avoid issues of the anomalous data exceeding the refinement resolution if max_res != 'none': file.write('RESOLUTION FILE_NUMBER 1 1000.0 ') file.write(max_res) file.write('/n') file.write('END\n') file.close() runcad = 'cad HKLIN1 mi_refine.mtz HKLOUT mi_anommap.mtz < mi_cad.inp > mi_cad.log' os.system(runcad) fileexists = os.path.exists('mi_anommap.mtz') if fileexists != 0: os.remove('mi_cad.log') os.remove('mi_cad.inp') else: print 'The CAD run to extract anomalous difference data seems to have failed' time.sleep(4) return 1 # Set resolution if max_res == 'none': resolution_output = resolution_mtz else: resolution_output = max_res # Copy coordinates to local working area and collect information on content offset = 0 res_count = 0 cryst_found = 'no' file = open(pdbfile,'r') allLines = file.readlines() file.close() # Precheck to look for CRYST1 record for eachLine in allLines: tag = eachLine[0:6] tag = tag.strip() if tag == 'CRYST1': cryst_found = 'yes' file = open('mi_refine.pdb','w') # Add CRYST1 record if not present if cryst_found == 'no': acell_mtz = '%.3f'%(acell_mtz) acell_mtz = str(acell_mtz) acell_mtz = acell_mtz.rjust(9) bcell_mtz = '%.3f'%(bcell_mtz) bcell_mtz = str(bcell_mtz) bcell_mtz = bcell_mtz.rjust(9) ccell_mtz = '%.3f'%(ccell_mtz) ccell_mtz = str(ccell_mtz) ccell_mtz = ccell_mtz.rjust(9) alpha_mtz = '%.2f'%(alpha_mtz) alpha_mtz = str(alpha_mtz) alpha_mtz = alpha_mtz.rjust(7) beta_mtz = '%.2f'%(beta_mtz) beta_mtz = str(beta_mtz) beta_mtz = beta_mtz.rjust(7) gamma_mtz = '%.2f'%(gamma_mtz) gamma_mtz = str(gamma_mtz) gamma_mtz = gamma_mtz.rjust(7) aLine = 'CRYST1' + acell_mtz + bcell_mtz + ccell_mtz \ + alpha_mtz + beta_mtz + gamma_mtz + ' ' + space_group_mtz file.write(aLine) file.write('\n') # Read/write file contents seq_chain_prev = '?' for eachLine in allLines: tag = eachLine[0:6] tag = tag.strip() # Parse SEQRES records into chain,name,dummy-number lists if tag == 'SEQRES': SEQRES = eachLine.strip() aList_SEQRES.append(SEQRES) seq_chain = eachLine[11:12] seq_chain = seq_chain.strip() seqLine = eachLine[19:70] parseLine = seqLine.split() if seq_chain != seq_chain_prev: aList_sequence_chain_id.append(seq_chain) seq_chain_prev = seq_chain length = len(parseLine) count = 0 while count < length: res_name = parseLine[count] aList_sequence_chain.append(seq_chain) aList_sequence_number.append('?') aList_sequence_resname.append(res_name) count = count + 1 # Ensure pdb/mtz cell compatibility and check CRYST1 integrity if tag == 'CRYST1': parseLine = eachLine.split() length = len(parseLine) if length > 7: acell_pdb = parseLine[1] bcell_pdb = parseLine[2] ccell_pdb = parseLine[3] alpha_pdb = parseLine[4] beta_pdb = parseLine[5] gamma_pdb = parseLine[6] acell_pdb = float(acell_pdb) bcell_pdb = float(bcell_pdb) ccell_pdb = float(ccell_pdb) alpha_pdb = float(alpha_pdb) beta_pdb = float(beta_pdb) gamma_pdb = float(gamma_pdb) a_dif = acell_pdb - acell_mtz b_dif = bcell_pdb - bcell_mtz c_dif = ccell_pdb - ccell_mtz alpha_dif = alpha_pdb - alpha_mtz beta_dif = beta_pdb - beta_mtz gamma_dif = gamma_pdb - gamma_mtz a_dif = abs(a_dif) b_dif = abs(b_dif) c_dif = abs(c_dif) alpha_dif = abs(alpha_dif) beta_dif = abs(beta_dif) gamma_dif = abs(gamma_dif) # Fix to mtz values if disagreeing and write a new CRYST1 record if a_dif > 0.1 or b_dif > 0.1 or c_dif > 0.1 \ or alpha_dif > 0.1 or beta_dif > 0.1 or gamma_dif > 0.1: acell_mtz = '%.3f'%(acell_mtz) acell_mtz = str(acell_mtz) acell_mtz = acell_mtz.rjust(9) bcell_mtz = '%.3f'%(bcell_mtz) bcell_mtz = str(bcell_mtz) bcell_mtz = bcell_mtz.rjust(9) ccell_mtz = '%.3f'%(ccell_mtz) ccell_mtz = str(ccell_mtz) ccell_mtz = ccell_mtz.rjust(9) alpha_mtz = '%.2f'%(alpha_mtz) alpha_mtz = str(alpha_mtz) alpha_mtz = alpha_mtz.rjust(7) beta_mtz = '%.2f'%(beta_mtz) beta_mtz = str(beta_mtz) beta_mtz = beta_mtz.rjust(7) gamma_mtz = '%.2f'%(gamma_mtz) gamma_mtz = str(gamma_mtz) gamma_mtz = gamma_mtz.rjust(7) eachLine = 'CRYST1' + acell_mtz + bcell_mtz + ccell_mtz\ + alpha_mtz + beta_mtz + gamma_mtz + ' ' + space_group_mtz # Check atom information if tag == 'ATOM' or tag == 'HETATM': chain_id = eachLine[21:22] chain_id = chain_id.strip() res_number = eachLine[22:26] res_number = res_number.strip() res_name = eachLine[17:20] res_name = res_name.strip() atom_name = eachLine[12:16] # Check and fix potential atom justification issues for common ions atom_justify = 'OK' if atom_name == ' NA ': atom_name = 'NA ' atom_justify = 'notOK' if atom_name == ' MG ': atom_name = 'MG ' atom_justify = 'notOK' if atom_name == ' CL ': atom_name = 'CL ' atom_justify = 'notOK' if atom_name == ' CR ': atom_name = 'CR ' atom_justify = 'notOK' if atom_name == ' MN ': atom_name = 'MN ' atom_justify = 'notOK' if atom_name == ' FE ': atom_name = 'FE ' atom_justify = 'notOK' if atom_name == ' CO ': atom_name = 'CO ' atom_justify = 'notOK' if atom_name == ' NI ': atom_name = 'NI ' atom_justify = 'notOK' if atom_name == ' CU ': atom_name = 'CU ' atom_justify = 'notOK' if atom_name == ' ZN ': atom_name = 'ZN ' atom_justify = 'notOK' if atom_name == ' SE ': atom_name = 'SE ' atom_justify = 'notOK' if atom_name == ' BR ': atom_name = 'BR ' atom_justify = 'notOK' if atom_name == ' CS ': atom_name = 'CS ' atom_justify = 'notOK' if atom_justify == 'notOK': eachLine_fix = eachLine[0:12] + atom_name + eachLine[16:80] eachLine = eachLine_fix # Obtain residue (CA) count if eachLine.find(' CA ') > -1: res_count = res_count + 1 # Check for waters if res_name == 'HOH': water_count = water_count + 1 # Get highest residue number in the chain X we will assign for waters if chain_id == 'X': int_res_number = int(res_number) if int_res_number > res_number_X_high: res_number_X_high = int_res_number # Obtain protein chain names and terminii count = 0 found = 'no' if res_name != 'HOH': if chain_id == ' ': missing_protein_chain = 'yes' while count < number_chain_list: if chain_id == aList_chains[count]: found = 'yes' count = count + 1 if found == 'no': aList_nterm.append(res_number) aList_chains.append(chain_id) number_chain_list = len(aList_chains) if number_chain_list > 1: aList_cterm.append(res_number_prev) res_number_prev = res_number # Write record but eliminate SCALE records to avoid issues with changed cells and CISPEP # records since it it better if they are recomputed following refitting if tag != 'SCALE1' and tag != 'SCALE2' and tag != 'SCALE3' and tag != 'CISPEP': eachLine = eachLine.strip() file.write(eachLine) file.write('\n') file.close() aList_cterm.append(res_number_prev) number_chain_list = len(aList_chains) if cryst_found == 'no': print 'There was no CRYST1 record in the coordinate file - stopping\n' time.sleep(4) return 1 # Set water picking defaults depending on molecule size water_add = 0.25*res_count water_add = int(water_add) # Copy library file (if any) to temp area. REFMAC5 read requires a full path. if os.path.basename(libfile) != 'none': file = open(libfile,'r') allLines = file.readlines() file.close() # Check for extra protein-ligand covalent links for eachLine in allLines: if eachLine.find('data_link_list') > -1: extra_links = 'yes' temp_lib = os.path.join(ccp4.scr,'mi_templib.lib') file = open(temp_lib,'w') file.writelines(allLines) file.close() else: temp_lib = 'none' # Set general file names fileexists = os.path.exists(projectlog) if fileexists != 0: file = open(projectlog,'r') allLines = file.readlines() file.close() for eachLine in allLines: if eachLine.find('Job ID') > -1 and eachLine.find('refine') > -1: aList = eachLine.split('_') runid = aList[1] runid_int = int(runid) runid_int = runid_int + 1 runid = str(runid_int) job_id = job_prefix + runid # Fix case where there are unrecorded refinement files - get highest serial number filename_pdb = job_id + '.pdb' filename_pdb_full = os.path.join(workingdir,filename_pdb) fileexists = os.path.exists(filename_pdb_full) if fileexists != 0: runid_prev = 0 aList_dir = dircache.listdir(workingdir) number_files = len(aList_dir) count = 0 while count < number_files: afile = aList_dir[count] if afile.find('refine_') > -1 and afile.find('.pdb') > -1: afile_tag = afile.replace('refine_','') afile_tag = afile_tag.replace('.pdb','') if afile_tag.isdigit() == 1: runid_int = int(afile_tag) if runid_int > runid_prev: runid_prev = runid_int runid_int = runid_int + 1 runid = str(runid_int) job_id = job_prefix + runid count = count + 1 # filename_log = job_id + '.log' filename_pdb = job_id + '.pdb' filename_mtz = job_id + '.mtz' filename_tls = job_id + '.tls' errorfile = job_id + '_errors.txt' filename_log_full = os.path.join(workingdir,filename_log) filename_pdb_full = os.path.join(workingdir,filename_pdb) filename_mtz_full = os.path.join(workingdir,filename_mtz) filename_errors_full = os.path.join(workingdir,errorfile) filename_tls_full = os.path.join(workingdir,filename_tls) fileexists = os.path.exists(filename_log) if fileexists != 0: os.remove(filename_log) fileexists = os.path.exists(filename_pdb) if fileexists != 0: os.remove(filename_pdb) fileexists = os.path.exists(filename_mtz) if fileexists != 0: os.remove(filename_mtz) fileexists = os.path.exists(filename_tls) if fileexists != 0: os.remove(filename_tls) ################################################# # Start rigid-body refinement (REFMAC5) section # ################################################# if ref_engine == 'rigid': print '\nStarting REFMAC5 rigid-body refinement process' print 'CCP4 scratch space:',working_ccp4_scratch print 'Job-ID:',job_id print 'Using mtz data:',flabel,',',sigflabel,',',rfreelabel # REFMAC specific file names filename_in = job_id + '.inp' filename_refmac_temp = job_id + '.refmac' filename_refmac = job_id + '_cif.txt' filename_refmac_full = os.path.join(workingdir,filename_refmac) # Setup file = open(filename_in,'w') file.write('LABIN FP=') file.write(flabel) file.write(' SIGFP=') file.write(sigflabel) file.write(' FREE=') file.write(rfreelabel) file.write('\nLABOUT FC=FC PHIC=PHIC DELFWT=DELFWT PHDELWT=PHDELFWT FWT=FWT FOM=FOM\n') if max_res != 'none': file.write('RESOLUTION 100.0 ') file.write(max_res) file.write('\n') file.write('FREE ') file.write(freeflag) file.write('\nREFI TYPE RIGID\n') file.write('REFI RESI MLKF\n') file.write('REFI BREF OVERall METH CGMAT\n') file.write('SCAL TYPE BULK LSSC ANIS FIXBulk SCBUlk 0.78 BBULK 180.0\n') file.write('SOLVENT NO\n') file.write('MAKE_RESTRAINTS HYDR N\n') file.write('RIGIDbody NCYC 12\n') # Set group definitions by chain-id count = 0 while count < number_chain_list: group_number = count + 1 group_number = str(group_number) chain_id = aList_chains[count] nterm = aList_nterm[count] cterm = aList_cterm[count] file.write('RIGIDbody GROUP ') file.write(group_number) file.write(' FROM ') file.write(nterm) file.write(' ') file.write(chain_id) file.write(' TO ') file.write(cterm) file.write(' ') file.write(chain_id) file.write('\n') count = count + 1 # file.write('MONI FEW\n') file.write('BINS 10\n') file.write('USECWD\n') file.write('PNAME noid\n') file.write('DNAME ') file.write(job_id) file.write('\n') file.write('END\n') file.close() # Run runrefine = 'refmac5 HKLIN mi_refine.mtz XYZIN mi_refine.pdb XYZOUT mi_refine_out.pdb HKLOUT '\ + filename_mtz + ' < ' + filename_in + ' > ' + filename_log os.system(runrefine) # Clean-up and rename os.remove(filename_in) fileexists = os.path.exists('mi_refine_out.pdb') if fileexists != 0: os.rename('mi_refine_out.pdb',filename_pdb) print 'Output PDB file:',filename_pdb else: 'REFMAC5 rigid-body o/p coordinate file was not found' time.sleep(4) return 1 fileexists = os.path.exists(filename_mtz) if fileexists != 0: print 'Output MTZ file:',filename_mtz else: 'REFMAC rigid-body o/p phased data file was not found' time.sleep(4) return 1 fileexists = os.path.exists(filename_refmac) if fileexists != 0: os.remove(filename_refmac) fileexists = os.path.exists('mi_refine.mtz') if fileexists != 0: os.remove('mi_refine.mtz') fileexists = os.path.exists(filename_refmac_temp) if fileexists != 0: os.rename(filename_refmac_temp,filename_refmac) print 'Output CIF log file:',filename_refmac # Parse global summary file = open(filename_refmac,'r') allLines = file.readlines() file.close() for eachLine in allLines: if eachLine.find('_refine.ls_R_factor_R_work') > -1: parseLine = eachLine.split() rwork = parseLine[1] if eachLine.find('_refine.ls_R_factor_R_free') > -1: parseLine = eachLine.split() rfree = parseLine[1] else: print 'REFMAC o/p CIF log file was not found' return 1 fileexists = os.path.exists(filename_log) if fileexists != 0: print 'Output REFMAC5 log:',filename_log else: print 'The REFMAC5 log file was not found' time.sleep(4) return 1 print 'Rwork=',rwork,' Rfree=',rfree ########################### # Start REFMAC5 section # ########################### if ref_engine == 'refmac5': print '\nStarting REFMAC5 process' print 'CCP4 scratch space:',working_ccp4_scratch print 'Job-ID:',job_id print 'Using mtz data:',flabel,',',sigflabel,',',rfreelabel if os.path.basename(libfile) != 'none': print 'Using library file:',libfile # REFMAC specific file names filename_in = job_id + '.inp' filename_refmac_temp = job_id + '.refmac' filename_refmac = job_id + '_cif.txt' filename_refmac_full = os.path.join(workingdir,filename_refmac) # Establish TLS file if os.path.basename(tlsfile) != 'none': file = open(tlsfile,'r') allLines = file.readlines() file.close() file = open('mi_temp.tls','w') file.writelines(allLines) file.close() tls_files = ' TLSIN mi_temp.tls TLSOUT ' + filename_tls + ' ' else: tls_files = ' ' # Setup file = open(filename_in,'w') file.write('LABIN FP=') file.write(flabel) file.write(' SIGFP=') file.write(sigflabel) file.write(' FREE=') file.write(rfreelabel) file.write('\nLABOUT FC=FC PHIC=PHIC DELFWT=DELFWT PHDELWT=PHDELFWT FWT=FWT FOM=FOM\n') file.write('FREE ') file.write(freeflag) file.write('\n') # Options/defaults if max_res != 'none': file.write('RESOLUTION 100.0 ') file.write(max_res) file.write('\n') if bref_type == 'anisotropic': file.write('REFI BREF ANISotropic METH CGMAT\n') else: file.write('REFI BREF ISOT METH CGMAT\n') # Standard setup file.write('SCAL TYPE SIMPLE LSSC ANIS\n') file.write('SOLVENT YES\n') file.write('REFI TYPE RESTtrained\n') file.write('REFI RESI MLKF\n') # TLS option - set uniform B, establish TLS then refine residual B-factors if os.path.basename(tlsfile) != 'none': file.write('REFI TLSC 20\n') file.write('BFAC SET 30.0\n') file.write('WEIGH MATRIX ') file.write(weight) file.write('\n') if extra_links == 'yes': file.write('MAKE_RESTRAINTS LINK Y\n') file.write('MAKE_RESTRAINTS CISP Y\n') file.write('MAKE_RESTRAINTS SS Y\n') file.write('MAKE_RESTRAINTS HYDR N\n') file.write('BFAC 1 2.0 2.5 3.0 4.5\n') file.write('NCYC ') file.write(cycles) file.write('\n') # validation monitor file.write('MONI DIST 6.0\n') file.write('MONI ANGL 8.0\n') file.write('MONI TORSION 10.0\n') file.write('MONI PLANE 10.0\n') file.write('MONI VANderwaals 4.25\n') file.write('MONI CHIRAL 8.0\n') file.write('MONI BFACTOR 4.0\n') file.write('BINS 20\n') file.write('USECWD\n') file.write('PNAME noid\n') file.write('DNAME ') file.write(job_id) file.write('\n') file.write('END\n') file.close() # Run process over number of big cycles count = 0 while count < big_cycles: print 'Refining' if os.path.basename(libfile) == 'none': runrefine = 'refmac5 HKLIN mi_refine.mtz XYZIN mi_refine.pdb XYZOUT mi_refine_out.pdb HKLOUT '\ + filename_mtz + tls_files + ' < ' + filename_in + ' > ' + filename_log else: runrefine = 'refmac5 HKLIN mi_refine.mtz XYZIN mi_refine.pdb LIBIN ' + temp_lib + \ ' XYZOUT mi_refine_out.pdb HKLOUT ' + filename_mtz + tls_files + ' < ' + filename_in + ' > ' + filename_log os.system(runrefine) # Check run completed fileexists = os.path.exists(filename_mtz) if fileexists == 0: print 'REFMAC o/p phased data file was not found' time.sleep(4) return 1 fileexists = os.path.exists('mi_refine_out.pdb') if fileexists == 0: print 'REFMAC o/p coordinate file was not found' time.sleep(4) return 1 # # Apply water picking option # if water_pick == 'yes' and count < water_cycles: print 'Water picking' # 1FF map file = open('mi_fft.inp','w') file.write('LABIN F1=DELFWT PHI=PHDELFWT\n') file.write('END\n') file.close() runfft = 'fft HKLIN ' + filename_mtz + ' MAPOUT mi_1ff.map < mi_fft.inp > mi_fft.log' os.system(runfft) fileexists = os.path.exists('mi_1ff.map') if fileexists == 0: print 'FFT for water picking failed' time.sleep(4) return 1 else: os.remove('mi_fft.inp') os.remove('mi_fft.log') # Setup crystal araound the protein file = open('mi_mapmask.inp','w') file.write('BORDER 5.0\n') file.write('EXTEND XTAL\n') file.write('END\n') file.close() runmapmask = 'mapmask XYZIN mi_refine_out.pdb MAPIN mi_1ff.map MAPOUT mi_1ff_masked.map < mi_mapmask.inp > mi_mapmask.log' os.system(runmapmask) fileexists = os.path.exists('mi_1ff_masked.map') if fileexists == 0: print 'MAPMASK for water picking failed' time.sleep(4) return 1 else: os.remove('mi_mapmask.inp') os.remove('mi_mapmask.log') os.remove('mi_1ff.map') # Water peak picking file = open('mi_peakmax.inp','w') file.write('THRESHOLD RMS 4.0\n') file.write('OUTPUT PDB\n') file.write('BFACTOR 30.0 1.0\n') file.write('RESIDUE HOH\n') file.write('ATNAME O\n') file.write('CHAIN X\n') file.write('NUMPEAKS 500\n') file.write('EXCLUDE EDGE\n') file.write('END\n') file.close() runpeakmax = 'peakmax MAPIN mi_1ff_masked.map XYZOUT mi_refine_peaks.pdb < mi_peakmax.inp > mi_peakmax_wat.log' os.system(runpeakmax) fileexists = os.path.exists('mi_refine_peaks.pdb') if fileexists == 0: print 'PEAKMAX run failed' time.sleep(4) return 1 # Water peak reduction by symmetry and protein proximity file = open('mi_watpeak.inp','w') file.write('DISTANCE 2.3 3.5\n') file.write('CHAIN X\n') file.write('SYMMETRY ') file.write(space_group) file.write('\nEND\n') file.close() runwatpeak = 'watpeak XYZIN mi_refine_out.pdb PEAKS mi_refine_peaks.pdb XYZOUT mi_refine_wat.pdb < mi_watpeak.inp > mi_watpeak.log' os.system(runwatpeak) fileexists = os.path.exists('mi_refine_wat.pdb') if fileexists == 0: print 'WATPEAK run failed' time.sleep(4) return 1 # Capture water atom records up to limit # Adjust for ascending numbering within the water chain file = open('mi_refine_wat.pdb','r') allLines = file.readlines() file.close() aList_waters = [] water_pick_counter = 1 for eachLine in allLines: tag = eachLine[0:6] tag = tag.strip() if tag == 'ATOM' or tag == 'HETATM': water_pick_counter = water_pick_counter + 1 if water_pick_counter < water_add: res_number_X_high = res_number_X_high + 1 str_res_number = str(res_number_X_high) str_res_number = str_res_number.rjust(4) atom_water_record = eachLine[0:22] + str_res_number + eachLine[26:80] atom_water_record = atom_water_record.strip() aList_waters.append(atom_water_record) # Clean-up coordinate file debris from water picking fileexists = os.path.exists('mi_refine_peaks.pdb') if fileexists != 0: os.remove('mi_refine_peaks.pdb') fileexists = os.path.exists('mi_refine_wat.pdb') if fileexists != 0: os.remove('mi_refine_wat.pdb') # Rewrite current PDB file ready to append new waters file = open('mi_refine_out.pdb','r') allLines = file.readlines() file.close() os.remove('mi_refine_out.pdb') file = open('mi_refine_out.pdb','w') for eachLine in allLines: tag = eachLine[0:6] tag = tag.strip() if tag != 'END' and tag != 'CONECT': file.write(eachLine) # Add new waters number_water_list = len(aList_waters) count_rec = 0 while count_rec < number_water_list: aLine = aList_waters[count_rec] file.write(aLine) file.write('\n') count_rec = count_rec + 1 file.write('END\n') file.close() print 'Number of waters added: ',number_water_list # Clean-up os.remove('mi_1ff_masked.map') os.remove('mi_peakmax.inp') os.remove('mi_peakmax_wat.log') os.remove('mi_watpeak.inp') os.remove('mi_watpeak.log') # # end of water picking option # os.remove('mi_refine.pdb') os.rename('mi_refine_out.pdb','mi_refine.pdb') count = count + 1 # Clean-up and rename os.remove(filename_in) print 'Output MTZ file:',filename_mtz fileexists = os.path.exists('mi_refine.pdb') if fileexists != 0: os.rename('mi_refine.pdb',filename_pdb) print 'Output PDB file:',filename_pdb else: 'REFMAC5 o/p coordinate file was not found' time.sleep(4) return 1 fileexists = os.path.exists(filename_refmac) if fileexists != 0: os.remove(filename_refmac) fileexists = os.path.exists('mi_refine.mtz') if fileexists != 0: os.remove('mi_refine.mtz') fileexists = os.path.exists('mi_temp.tls') if fileexists != 0: os.remove('mi_temp.tls') fileexists = os.path.exists(filename_refmac_temp) if fileexists != 0: os.rename(filename_refmac_temp,filename_refmac) print 'Output CIF log file:',filename_refmac # Parse global summary file = open(filename_refmac,'r') allLines = file.readlines() file.close() for eachLine in allLines: if eachLine.find('_refine.ls_R_factor_R_work') > -1: parseLine = eachLine.split() rwork = parseLine[1] if eachLine.find('_refine.ls_R_factor_R_free') > -1: parseLine = eachLine.split() rfree = parseLine[1] if eachLine.find('r_bond_refined_d') > -1: parseLine = eachLine.split() rmsd_bonds = parseLine[2] else: 'REFMAC o/p CIF log file was not found' return 1 fileexists = os.path.exists(filename_log) if fileexists != 0: print 'Output REFMAC5 log:',filename_log else: print 'The REFMAC5 log file was not found' time.sleep(4) return 1 print 'Rwork=',rwork,' Rfree=',rfree,' RMSD(bonds)=',rmsd_bonds ########################## # End of REFMAC5 section # ########################## ####################### # Start SHELX section # ####################### if ref_engine == 'shelx': print '\nStarting SHELX refinement process' print 'CCP4 scratch space:',working_ccp4_scratch print 'Job-ID:',job_id print 'Using mtz data:',flabel,',',sigflabel if missing_protein_chain == 'yes': print 'Chain identifiers must be assigned for all protein atoms' time.sleep(4) return 1 if os.path.basename(libfile) != 'none': print 'Using library file:',libfile filelib = open(libfile,'r') allLiblines = filelib.readlines() filelib.close() # Base .hkl and .ins root files name on pdb file name ins_file = job_id + '.ins' hkl_file = job_id + '.hkl' ins_file_full = os.path.join(workingdir,ins_file) hkl_file_full = os.path.join(workingdir,hkl_file) filename_lst = job_id + '.lst' filename_res = job_id + '.res' filename_fcf = job_id + '.fcf' filename_mtz = job_id + '.mtz' filename_lst_full = os.path.join(workingdir,filename_lst) filename_res_full = os.path.join(workingdir,filename_res) filename_fcf_full = os.path.join(workingdir,filename_fcf) filename_mtz_full = os.path.join(workingdir,filename_mtz) fileexists = os.path.exists(ins_file) if fileexists != 0: os.remove(ins_file) fileexists = os.path.exists(hkl_file) if fileexists != 0: os.remove(hkl_file) # Setup reflection file in SHELX format file = open('mi_mtz2various.inp','w') file.write('LABIN FP=') file.write(flabel) file.write(' SIGFP=') file.write(sigflabel) file.write(' FREE=') file.write(rfreelabel) file.write('\n') if max_res != 'none': file.write('RESOLUTION 1000.0 ') file.write(max_res) file.write('\n') file.write('OUTPUT SHELX\n') file.write('EXCLUDE FREER 0\n') file.write('END\n') file.close() runmtz = 'mtz2various HKLIN mi_refine.mtz HKLOUT mi_refine.hkl < mi_mtz2various.inp > mi_mtz2various.log' os.system(runmtz) fileexists = os.path.exists('mi_mtz2various.inp') if fileexists != 0: os.remove('mi_mtz2various.inp') fileexists = os.path.exists('mi_mtz2various.log') if fileexists != 0: os.remove('mi_mtz2various.log') fileexists = os.path.exists('mi_refine.hkl') if fileexists != 0: file = open('mi_refine.hkl','r') allLines = file.readlines() file.close() os.remove('mi_refine.hkl') file = open(hkl_file,'w') for eachLine in allLines: write_reflection = 'yes' if eachLine.find('TITLE') > -1 or eachLine.find('CELL') > -1 or eachLine.find('ZERR') > -1\ or eachLine.find('LATT') > -1 or eachLine.find('SYMM') > -1 or eachLine.find('HKLF') > -1: write_reflection = 'no' if write_reflection == 'yes': file.write(eachLine) file.close() else: print 'File format conversion for SHELXH seems to have failed' time.sleep(4) return 1 # Setup input coordinates/restraints file (.ins) file with SHELXPRO print 'Running SHELXPRO' fileexists = os.path.exists('mi_shelxpro.inp') if fileexists != 0: os.remove('mi_shelxpro.inp') file = open('mi_shelxpro.inp','w') if test_platform.find('win') > -1: file.write('mi_shelxpro\n') file.write('I\n') file.write('\n') file.write(ins_file) file.write('\nmi_refine.pdb\n') file.write('Written by MIFit\n') file.write('\n') file.write('\n') file.write('\n') file.write('\n') file.write('C\n') # Chain offsets (+1000 etc) since SHELX does not support chains if number_chain_list > 0: count = 0 while count < number_chain_list: file.write('\n') count = count + 1 # List N-terminii final_chain = number_chain_list - 1 count = 0 while count < number_chain_list: nterm = aList_nterm[count] file.write(nterm) if count < final_chain: file.write('=\n') else: file.write('\n') count = count + 1 # List C-terminii count = 0 while count < number_chain_list: cterm = aList_cterm[count] file.write(cterm) if count < final_chain: file.write('=\n') else: file.write('\n') count = count + 1 file.write('\n') file.write('\n') file.write('N\n') file.write('3\n') file.write('\n') file.write('Q\n') file.write('\n') file.close() # Execute SHELXPRO to obtain mi_refine.ins runshelxpro = '"' + shelxpro + '"' + ' < mi_shelxpro.inp > mi_shelxpro.log' os.system(runshelxpro) os.remove('mi_shelxpro.inp') os.remove('mi_shelxpro.log') # Adjust run parameters, insert restraints and rename fileexists = os.path.exists(ins_file) if fileexists != 0: file = open(ins_file,'r') allLines = file.readlines() file.close() os.remove(ins_file) # Read/write to adjust ins file file = open(ins_file,'w') for eachLine in allLines: write_flag = 'no' # Capture number of molecules for PDB write later if eachLine.find('ZERR') > -1: parseLine = eachLine.split() number_molecules = parseLine[1] if eachLine.find('WGHT') > -1: # Insert any extra restraints if os.path.basename(libfile) != 'none': for eachLibline in allLiblines: file.write(eachLibline) file.write('\n') # Insert weight file.write('WGHT ') file.write(weight) if bref_type == 'anisotropic': file.write('\nANIS\n') write_flag = 'yes' # Number of refinement cycles if eachLine.find('CGLS') > -1: file.write('CGLS ') file.write(cycles) file.write('\n') write_flag = 'yes' if write_flag == 'no': file.write(eachLine) file.close() else: print 'SHELXPRO run failed to generate .ins file' time.sleep(4) return 1 fileexists = os.path.exists('mi_shelxpro.pro') if fileexists != 0: os.remove('mi_shelxpro.pro') fileexists = os.path.exists('mi_shelxpro.ps') if fileexists != 0: os.remove('mi_shelxpro.ps') # Execute SHELXH refinement job print 'Running SHELXH' runshelx = '"' + shelxh + '"' + ' ' + job_id + ' > mi_shelxh.log' os.system(runshelx) fileexists = os.path.exists('mi_shelxh.log') if fileexists != 0: file = open('mi_shelxh.log','r') allLines = file.readlines() file.close() for eachLine in allLines: if eachLine.find('R1') > -1: parseLine = eachLine.split() rwork = parseLine[2] os.rename('mi_shelxh.log',filename_log) else: print 'SHELXH job did not run' return 1 fileexists = os.path.exists(filename_lst) if fileexists == 0: print 'SHELXH lst file was not created' time.sleep(4) return 1 fileexists = os.path.exists(filename_res) if fileexists == 0: print 'SHELXH res file was not created' time.sleep(4) return 1 fileexists = os.path.exists(filename_fcf) if fileexists == 0: print 'SHELXH fcf file was not created' time.sleep(4) return 1 print 'Rwork=',rwork # Append water information (note that waters are renumbered 1,2,3..) if water_count > 0: pr_water_count = str(water_count) aList_chains.append(' ') aList_nterm.append('1') aList_cterm.append(pr_water_count) number_chain_list = number_chain_list + 1 # Back convert to PDB format with SHELXPRO print 'Running SHELXPRO' file = open('mi_shelxpro.inp','w') if test_platform.find('win') > -1: file.write('mi_shelxpro\n') file.write('G\n') file.write('\n') file.write('S\n') file.write(filename_res) file.write('\n') file.write('N\n') file.write('Y\n') file.write('Y\n') file.write('N\n') file.write('K\n') file.write(number_molecules) file.write('\n') file.write('\n') file.write(filename_pdb) file.write('\n') file.write('Written by a MIFit application\n') # Loop over chains to put back correct chain-number pairs if number_chain_list > 0: count = 0 while count < number_chain_list: file.write('$\n') chain_id = aList_chains[count] nterm = aList_nterm[count] cterm = aList_cterm[count] nterm_current = int(nterm) + (count + 1) * 1000 cterm_current = int(cterm) + (count + 1) * 1000 nterm_current = str(nterm_current) cterm_current = str(cterm_current) file.write(chain_id) file.write('\n') file.write('\n') file.write(nterm_current) file.write(' ') file.write(cterm_current) file.write('\n') file.write(nterm) file.write('\n') count = count + 1 file.write('\n') file.write('Q\n') file.write('\n') file.close() # Execute SHELXPRO runshelxpro = '"' + shelxpro + '"' + ' < mi_shelxpro.inp > mi_shelxpro.log' os.system(runshelxpro) fileexists = os.path.exists(filename_pdb) if fileexists == 0: print 'SHELXPRO job failed to generate PDB file' time.sleep(4) return 1 # Back convert the fcf file phased data information into mtz format for easy MIFit load print 'Converting output data to mtz format for MIFit input' print 'Note:FWT contains pre computed 2Fo-Fc map coefficients' print ' and DELFWT contains precomputed Fo-Fc map coefficients' file = open(filename_fcf,'r') allLines = file.readlines() file.close() file = open(hkl_file,'w') for eachLine in allLines: tag = eachLine[0:1] tag = tag.strip() if tag != ' ' and tag != '_' and tag != '#': parseLine = eachLine.split() num_args = len(parseLine) if num_args == 7: h = parseLine[0] k = parseLine[1] l = parseLine[2] fobs_sq = parseLine[3] fcalc = parseLine[5] phase = parseLine[6] h = int(h) k = int(k) l = int(l) fobs_sq = float(fobs_sq) fcalc = float(fcalc) phase = float(phase) fobs = math.sqrt(fobs_sq) twofofc = 2.0*fobs - fcalc twofofc = round(twofofc,3) fofc = fobs - fcalc fofc = round(fofc,3) aLine = str(h) + ' ' + str(k) + ' ' + str(l) + \ ' ' + str(twofofc) + ' ' + str(fofc) + ' ' + str(phase) + ' ' + str(phase) file.write(aLine) file.write('\n') file.close() # Step 2, convert ascii to mtz aLine = str(acell_mtz) + ' ' + str(bcell_mtz) + ' ' + str(ccell_mtz)\ + ' ' + str(alpha_mtz) + ' ' + str(beta_mtz) + ' ' + str(gamma_mtz) file = open('mi_f2mtz.inp','w') file.write('NAME PROJECT Shelx_map_coeffs CRYSTAL 1 DATASET 1\n') file.write('CELL ') file.write(aLine) file.write('\n') file.write('SYMMETRY ') file.write(space_group) file.write('\n') file.write('LABOUT H K L FWT DELFWT PHWT PHDELFWT\n') file.write('CTYPOUT H H H F F P P\n') file.write('END\n') file.close() runf2mtz = 'f2mtz HKLIN ' + hkl_file + ' HKLOUT ' + filename_mtz + ' < mi_f2mtz.inp > mi_f2mtz.log' os.system(runf2mtz) fileexists = os.path.exists(filename_mtz) if fileexists == 0: print 'F2MTZ failed to convert to output mtz file' time.sleep(4) return 1 else: os.remove('mi_f2mtz.inp') os.remove('mi_f2mtz.log') # Clean-up various intermediate files fileexists = os.path.exists(ins_file_full) if fileexists != 0: os.remove(ins_file_full) fileexists = os.path.exists(hkl_file_full) if fileexists != 0: os.remove(hkl_file_full) fileexists = os.path.exists(filename_fcf_full) if fileexists != 0: os.remove(filename_fcf_full) fileexists = os.path.exists(filename_res_full) if fileexists != 0: os.remove(filename_res_full) fileexists = os.path.exists('mi_refine.pdb') if fileexists != 0: os.remove('mi_refine.pdb') fileexists = os.path.exists('mi_refine.mtz') if fileexists != 0: os.remove('mi_refine.mtz') fileexists = os.path.exists('mi_shelxpro.pro') if fileexists != 0: os.remove('mi_shelxpro.pro') fileexists = os.path.exists('mi_shelxpro.ps') if fileexists != 0: os.remove('mi_shelxpro.ps') fileexists = os.path.exists('shelxpro.pro') if fileexists != 0: os.remove('shelxpro.pro') fileexists = os.path.exists('shelxpro.ps') if fileexists != 0: os.remove('shelxpro.ps') fileexists = os.path.exists('mi_shelxpro.inp') if fileexists != 0: os.remove('mi_shelxpro.inp') fileexists = os.path.exists('mi_shelxpro.log') if fileexists != 0: os.remove('mi_shelxpro.log') ######################## # End of SHELX section # ######################## ######################## # Structure validation # ######################## if validate == 'yes' and ref_engine == 'refmac5': print 'Checking structure' # Get entity list of current model file = open(filename_pdb,'r') allLines = file.readlines() file.close() chain_id_prev = '?' res_number_prev = '?' for eachLine in allLines: tag = eachLine[0:6] tag = tag.strip() if tag == 'ATOM' or tag == 'HETATM': chain_id = eachLine[21:22] res_number = eachLine[22:26] res_number = res_number.strip() res_name = eachLine[17:20] res_name = res_name.strip() atom_name = eachLine[12:16] atom_name = atom_name.strip() disorder_id = eachLine[16:17] disorder_id = disorder_id.strip() if disorder_id != '': aList_disorder_chain.append(chain_id) aList_disorder_resno.append(res_number) aList_disorder_resname.append(res_name) # Form all atom list aList_allatoms_chain.append(chain_id) aList_allatoms_res_number.append(res_number) aList_allatoms_res_name.append(res_name) aList_allatoms_atom_name.append(atom_name) # Form residue list if res_name != 'HOH': if chain_id != chain_id_prev or res_number != res_number_prev: aList_chain_store.append(chain_id) aList_res_number_store.append(res_number) aList_res_name_store.append(res_name) chain_id_prev = chain_id res_number_prev = res_number # Identify any non-PRO cis peptide links if tag == 'CISPEP': chain = eachLine[29:30] resnumber = eachLine[32:35] resname = eachLine[25:28] resnumber = resnumber.strip() resname = resname.strip() if resname != 'PRO': aList_cis_chain.append(chain) aList_cis_resno.append(resnumber) aList_cis_resname.append(resname) ####################################### # Parse refmac stereochemical scores # ####################################### file = open(filename_log,'r') allLines = file.readlines() file.close() for eachLine in allLines: # Parse section limits if eachLine.find('****') > -1 or eachLine.find('----') > -1: bond_list = 'no' angle_list = 'no' chiral_list = 'no' contact_list = 'no' chiral_list = 'no' # Start logging on finding final iteration number if eachLine.find('CGMAT cycle number') > -1 and eachLine.find(cycles) > -1: iteration_final = 'yes' # get abnormal bond list if bond_list == 'yes' and iteration_final == 'yes': chain = eachLine[0:1] chain = chain.strip() if chain != '': aList_split = eachLine.split() num_aLine = len(aList_split) if num_aLine > 2: resnumber = aList_split[1] resname = aList_split[2] aList_bonds_chain.append(chain) aList_bonds_resno.append(resnumber) aList_bonds_resname.append(resname) if eachLine.find('****') or eachLine.find('Limits'): bond_list == 'no' if eachLine.find('Bond distance deviations ') > -1: bond_list = 'yes' # get abnormal bond angle list if angle_list == 'yes' and iteration_final == 'yes': chain = eachLine[0:1] chain = chain.strip() if chain != '': aList_split = eachLine.split() num_aLine = len(aList_split) if num_aLine > 2: resnumber = aList_split[1] resname = aList_split[2] aList_angles_chain.append(chain) aList_angles_resno.append(resnumber) aList_angles_resname.append(resname) if eachLine.find('****') or eachLine.find('Limits'): angle_list == 'no' if eachLine.find('Bond angle deviations ') > -1: angle_list = 'yes' # get abnormal contacts list if contact_list == 'yes' and iteration_final == 'yes': chain = eachLine[0:1] chain = chain.strip() if chain != '': aList_split = eachLine.split() num_aLine = len(aList_split) if num_aLine > 10: resnumber = aList_split[1] resname = aList_split[2] disorder1 = aList_split[4] chain2 = aList_split[6] resname2 = aList_split[8] resnumber2 = aList_split[7] disorder2 = aList_split[10] # Skip intra-residue interactions if chain != chain2 or resnumber != resnumber2: if disorder1 == '.' and disorder2 == '.': aList_contacts_chain.append(chain) aList_contacts_resno.append(resnumber) aList_contacts_resname.append(resname) aList_contacts_chain.append(chain2) aList_contacts_resno.append(resnumber2) aList_contacts_resname.append(resname2) if eachLine.find('****') or eachLine.find('Limits'): contact_list == 'no' if eachLine.find('VDW deviations ') > -1: contact_list = 'yes' # get severe chiral center violations if chiral_list == 'yes' and iteration_final == 'yes': chain = eachLine[0:1] chain = chain.strip() if chain != '': resname = resname.strip() aList_split = eachLine.split() num_aLine = len(aList_split) if num_aLine > 2: resnumber = aList_split[1] resname = aList_split[2] aList_chiral_chain.append(chain) aList_chiral_resno.append(resnumber) aList_chiral_resname.append(resname) if eachLine.find('****') or eachLine.find('Limits'): chiral_list == 'no' if eachLine.find('Chiral volume deviations') > -1: chiral_list = 'yes' ########################################################### # Run omega check and phi-psi check using Richardson data # ########################################################### # Read Richardson data # General (non-GLY, non-PRO) data fileexists = os.path.exists(phipsi_gen_datafile) if fileexists == 0: print 'WARNING - Unable to locate general phi-psi validation data' else: file = open(phipsi_gen_datafile,'r') allLines = file.readlines() file.close for eachLine in allLines: tag = eachLine[0:1] if tag != '#': aLine = eachLine.split() phi = aLine[0] psi = aLine[1] phipsi_prob = aLine[2] phi = float(phi) psi = float(psi) phipsi_prob = float(phipsi_prob) aList_phi_all.append(phi) aList_psi_all.append(psi) aList_phipsi_prob_all.append(phipsi_prob) number_phipsi_gen_table = len(aList_phi_all) # GLY data fileexists = os.path.exists(phipsi_gly_datafile) if fileexists == 0: print 'WARNING - Unable to locate GLY phi-psi validation data' else: file = open(phipsi_gly_datafile,'r') allLines = file.readlines() file.close for eachLine in allLines: tag = eachLine[0:1] if tag != '#': aLine = eachLine.split() phi = aLine[0] psi = aLine[1] phipsi_prob = aLine[2] phi = float(phi) psi = float(psi) phipsi_prob = float(phipsi_prob) aList_phi_gly.append(phi) aList_psi_gly.append(psi) aList_phipsi_prob_gly.append(phipsi_prob) number_phipsi_gly_table = len(aList_phi_gly) # PRO data fileexists = os.path.exists(phipsi_pro_datafile) if fileexists == 0: print 'WARNING - Unable to locate PRO phi-psi validation data' else: file = open(phipsi_pro_datafile,'r') allLines = file.readlines() file.close for eachLine in allLines: tag = eachLine[0:1] if tag != '#': aLine = eachLine.split() phi = aLine[0] psi = aLine[1] phipsi_prob = aLine[2] phi = float(phi) psi = float(psi) phipsi_prob = float(phipsi_prob) aList_phi_pro.append(phi) aList_psi_pro.append(psi) aList_phipsi_prob_pro.append(phipsi_prob) number_phipsi_pro_table = len(aList_phi_pro) # run SECSTR to compute phi,psi,omega file = open(filename_pdb) allLines = file.readlines() file.close() file = open('mi_secstr.new','w') for eachLine in allLines: tag = eachLine[0:6] tag = tag.strip() if tag == 'ATOM' or tag == 'HETATM': file.write(eachLine) file.close() file = open('mi_secstr.inp','w') file.write('mi_secstr.new\n') file.close() runsecstr = 'secstr < mi_secstr.inp > mi_secstr.log' os.system(runsecstr) fileexists = os.path.exists('mi_secstr.rin') if fileexists == 0: print 'Phi-psi calculation failed' time.sleep(4) return 1 else: file = open('mi_secstr.rin','r') allLines = file.readlines() file.close() os.remove('mi_secstr.new') os.remove('mi_secstr.inp') os.remove('mi_secstr.log') os.remove('mi_secstr.rin') for eachLine in allLines: res_name = eachLine[4:7] res_number = eachLine[9:13] chain_id = eachLine[8:9] phi = eachLine[15:22] psi = eachLine[22:29] omega = eachLine[29:36] res_name = res_name.strip() res_number = res_number.strip() chain_id = chain_id.strip() phi = phi.strip() psi = psi.strip() omega = omega.strip() omega = float(omega) phi = float(phi) psi = float(psi) amino_acid_count = amino_acid_count + 1.0 ####################################################### # Search for outliers versus phi-psi probability data # ####################################################### if phi < 180 and psi < 180: lookup = 'yes' else: lookup = 'no' # Jump to useful region of table (minus safety margin) phi_point = phi + 180 phi_point = phi_point * 9.0 phi_point = math.floor(phi_point) - 19 phi_point = int(phi_point) if phi_point > 0: count = phi_point else: count = 0 if res_name != 'GLY' and res_name != 'PRO': while count < number_phipsi_gen_table and lookup == 'yes': phi_table = aList_phi_all[count] phi_diff = phi - phi_table phi_diff = abs(phi_diff) if phi_diff < 2.0: psi_table = aList_psi_all[count] psi_diff = psi - psi_table psi_diff = abs(psi_diff) if psi_diff < 2.0: phipsi_prob = aList_phipsi_prob_all[count] if phipsi_prob < phipsi_thresh_gen: aList_rama_chain.append(chain_id) aList_rama_resno.append(res_number) aList_rama_resname.append(res_name) lookup = 'no' count = count + 1 if res_name == 'GLY': while count < number_phipsi_gly_table and lookup == 'yes': phi_table = aList_phi_gly[count] phi_diff = phi - phi_table phi_diff = abs(phi_diff) if phi_diff < 2.0: psi_table = aList_psi_gly[count] psi_diff = psi - psi_table psi_diff = abs(psi_diff) if psi_diff < 2.0: phipsi_prob = aList_phipsi_prob_gly[count] if phipsi_prob < phipsi_thresh_gly: aList_rama_chain.append(chain_id) aList_rama_resno.append(res_number) aList_rama_resname.append(res_name) lookup = 'no' count = count + 1 if res_name == 'PRO': while count < number_phipsi_pro_table and lookup == 'yes': phi_table = aList_phi_pro[count] phi_diff = phi - phi_table phi_diff = abs(phi_diff) if phi_diff < 2.0: psi_table = aList_psi_pro[count] psi_diff = psi - psi_table psi_diff = abs(psi_diff) if psi_diff < 2.0: phipsi_prob = aList_phipsi_prob_pro[count] if phipsi_prob < phipsi_thresh_pro: aList_rama_chain.append(chain_id) aList_rama_resno.append(res_number) aList_rama_resname.append(res_name) lookup = 'no' count = count + 1 ############################# # Search for omega outliers # ############################# omega = float(omega) if omega < 180.0: if omega < 0.0: omega = -omega omega_deviation = omega_peak - omega omega_deviation = abs(omega_deviation) if omega_deviation > omega_thresh: aList_omega_chain.append(chain_id) aList_omega_resno.append(res_name) aList_omega_resname.append(res_number) #################################### # Run sidechain check with ROTAMER # #################################### file = open('mi_rotamer.inp','w') file.write('DELT 45\n') file.write('END\n') file.close() runrotamer = 'rotamer XYZIN ' + filename_pdb + ' < mi_rotamer.inp > mi_rotamer.log' os.system(runrotamer) fileexists = os.path.exists('mi_rotamer.log') if fileexists == 0: print 'Rotamer validation check failed' time.sleep(4) return 1 else: # Parse for chi-1 deviations (greater than 45 degrees) file = open('mi_rotamer.log','r') allLines = file.readlines() file.close() for eachLine in allLines: if eachLine.find(')') > -1 and eachLine.find('(') > -1: if eachLine[11:12] == '*': chain_id = eachLine[0:1] residue_id = eachLine[1:5] residue_name = eachLine[6:9] chain_id = chain_id.strip() residue_id = residue_id.strip() residue_name = residue_name.strip() aList_rotamer_chain.append(chain_id) aList_rotamer_resno.append(residue_id) aList_rotamer_resname.append(residue_name) os.remove('mi_rotamer.log') os.remove('mi_rotamer.inp') ################################################# # Locate difference density features on protein # ################################################# # Calculate 1FF map file = open('mi_fft.inp','w') file.write('LABIN F1=DELFWT PHI=PHDELFWT\n') file.write('END\n') file.close() runfft = 'fft HKLIN ' + filename_mtz + ' MAPOUT mi_1ff.map < mi_fft.inp > mi_fft.log' os.system(runfft) fileexists = os.path.exists('mi_1ff.map') if fileexists == 0: print 'FFT for density test failed' time.sleep(4) return 1 else: os.remove('mi_fft.inp') os.remove('mi_fft.log') # Build density around protein file = open('mi_mapmask.inp','w') file.write('EXTEND XTAL\n') file.write('BORDER 2.0\n') file.write('END\n') file.close() runmapmask = 'mapmask MAPIN mi_1ff.map XYZIN ' + filename_pdb + ' MAPOUT mi_1ff_masked.map < mi_mapmask.inp > mi_mapmask.log' os.system(runmapmask) fileexists = os.path.exists('mi_1ff_masked.map') if fileexists == 0: print 'MAPMASK for density test failed' return 1 else: os.remove('mi_mapmask.inp') os.remove('mi_mapmask.log') os.remove('mi_1ff.map') # Peak/hole pick near protein file = open('mi_peakmax.inp','w') file.write('THRESHOLD RMS 4.0 NEGATIVE\n') file.write('END\n') file.close() runpeakmax = 'peakmax MAPIN mi_1ff_masked.map XYZOUT mi_peakmax.pdb < mi_peakmax.inp > mi_peakmax.log 2> mi_peakmax_err.log' os.system(runpeakmax) fileexists = os.path.exists('mi_peakmax_err.log') if fileexists != 0: os.remove('mi_peakmax_err.log') fileexists = os.path.exists('mi_peakmax.log') if fileexists == 0: print 'PEAKMAX for density test failed' time.sleep(4) return 1 else: os.remove('mi_peakmax.inp') os.remove('mi_1ff_masked.map') os.remove('mi_peakmax.log') # Identify amino acids within 2.0A of any 4 sigma peaks/holes fileexists = os.path.exists('mi_peakmax.pdb') if fileexists != 0: file = open('mi_peakmax.pdb','r') allLines = file.readlines() file.close() os.remove('mi_peakmax.pdb') for eachLine in allLines: tag = eachLine[0:6] tag = tag.strip() if tag == 'ATOM' or tag == 'HETATM': x = eachLine[30:38] y = eachLine[38:46] z = eachLine[46:54] x = float(x) y = float(y) z = float(z) aList_peak_x.append(x) aList_peak_y.append(y) aList_peak_z.append(z) number_peaks = len(aList_peak_x) file = open(filename_pdb,'r') allLines = file.readlines() file.close() count = 0 while count < number_peaks: xp = aList_peak_x[count] yp = aList_peak_y[count] zp = aList_peak_z[count] for eachLine in allLines: tag = eachLine[0:6] tag = tag.strip() if tag == 'ATOM' or tag == 'HETATM': x = eachLine[30:38] y = eachLine[38:46] z = eachLine[46:54] x = float(x) y = float(y) z = float(z) dist = (xp - x) ** 2 + (yp - y)**2 + (zp - z)** 2 if dist < 4.0: chain = eachLine[21:22] res_number = eachLine[22:26] res_number = res_number.strip() res_name = eachLine[17:20] res_name = res_name.strip() aList_density_chain.append(chain) aList_density_resno.append(res_number) aList_density_resname.append(res_name) count = count + 1 ################################################################### # Build tidy error lists by combining error types for each entity # ################################################################### entity_count = len(aList_chain_store) bond_count = len(aList_bonds_chain) angles_count = len(aList_angles_chain) chiral_count = len(aList_chiral_chain) contacts_count = len(aList_contacts_chain) cis_count = len(aList_cis_chain) rotamer_count = len(aList_rotamer_chain) omega_count = len(aList_omega_chain) rama_count = len(aList_rama_chain) density_count = len(aList_density_chain) count = 0 while count < entity_count: geom_error = '.' contacts_error = '.' omega_error = '.' phipsi_error = '.' rotamer_error = '.' cis_error = '.' density_error = '.' error_flag = 'no' chain_store = aList_chain_store[count] res_number_store = aList_res_number_store[count] res_name_store = aList_res_name_store[count] # bonds count1 = 0 while count1 < bond_count: chain = aList_bonds_chain[count1] res_number = aList_bonds_resno[count1] res_name = aList_bonds_resname[count1] if chain == chain_store and res_number == res_number_store: geom_error = 'G' error_flag = 'yes' count1 = count1 + 1 # angles count1 = 0 while count1 < angles_count: chain = aList_angles_chain[count1] res_number = aList_angles_resno[count1] res_name = aList_angles_resname[count1] if chain == chain_store and res_number == res_number_store: geom_error = 'G' error_flag = 'yes' count1 = count1 + 1 # contacts count1 = 0 while count1 < contacts_count: chain = aList_contacts_chain[count1] res_number = aList_contacts_resno[count1] res_name = aList_contacts_resname[count1] if chain == chain_store and res_number == res_number_store: contacts_error = 'V' error_flag = 'yes' count1 = count1 + 1 # chiral count1 = 0 while count1 < chiral_count: chain = aList_chiral_chain[count1] res_number = aList_chiral_resno[count1] res_name = aList_chiral_resname[count1] if chain == chain_store and res_number == res_number_store: geom_error = 'G' error_flag = 'yes' count1 = count1 + 1 # cis peptide count1 = 0 while count1 < cis_count: chain = aList_cis_chain[count1] res_number = aList_cis_resno[count1] res_name = aList_cis_resname[count1] if chain == chain_store and res_number == res_number_store: cis_error = 'C' error_flag = 'yes' count1 = count1 + 1 # rotamer count1 = 0 while count1 < rotamer_count: chain = aList_rotamer_chain[count1] res_number = aList_rotamer_resno[count1] res_name = aList_rotamer_resname[count1] if chain == chain_store and res_number == res_number_store: rotamer_error = 'R' error_flag = 'yes' count1 = count1 + 1 # omega angles count1 = 0 while count1 < omega_count: chain = aList_omega_chain[count1] res_number = aList_omega_resno[count1] res_name = aList_omega_resname[count1] if chain == chain_store and res_number == res_number_store: omega_error = 'O' error_flag = 'yes' count1 = count1 + 1 # phi-psi count1 = 0 while count1 < rama_count: chain = aList_rama_chain[count1] res_number = aList_rama_resno[count1] res_name = aList_rama_resname[count1] if chain == chain_store and res_number == res_number_store: phipsi_error = 'P' error_flag = 'yes' count1 = count1 + 1 # Density count1 = 0 while count1 < density_count: chain = aList_density_chain[count1] res_number = aList_density_resno[count1] res_name = aList_density_resname[count1] if chain == chain_store and res_number == res_number_store: density_error = 'D' error_flag = 'yes' count1 = count1 + 1 # Write all error types for this residue if error_flag == 'yes': # count phi-psi and sidechain errors if phipsi_error == 'P': count_phipsi = count_phipsi + 1.0 if rotamer_error == 'R': count_rotamer = count_rotamer + 1.0 # Tidy output res_number_field = len(res_number_store) print_res_number = res_number_store if res_number_field == 1: print_res_number = ' ' + res_number_store if res_number_field == 2: print_res_number = ' ' + res_number_store if res_number_field == 3: print_res_number = ' ' + res_number_store aLine = ' ' + chain_store + ' ' + print_res_number + ' ' + res_name_store + ' ' + geom_error + ' ' \ + contacts_error + ' ' + omega_error + ' ' + phipsi_error + ' ' + cis_error + ' ' \ + rotamer_error + ' ' + density_error aList_errors.append(aLine) count = count + 1 # Local error counts percent_phi_psi = 100.0 * count_phipsi / amino_acid_count percent_phi_psi = round(percent_phi_psi,2) percent_phi_psi = str(percent_phi_psi) percent_rotamer = 100.0 * count_rotamer / amino_acid_count percent_rotamer = round(percent_rotamer,2) percent_rotamer = str(percent_rotamer) # Write error list number_errors = len(aList_errors) percent_errors = 100.0 * float(number_errors)/entity_count percent_errors = round(percent_errors,1) percent_errors = str(percent_errors) print 'Output putative error list:',errorfile print 'Percentage of residues in error list:', percent_errors file = open(errorfile,'w') file.write('#\n') file.write('# Working directory: ') file.write(workingdir) file.write('\n# Coordinates: ') file.write(filename_pdb) file.write('\n# Data: ') file.write(filename_mtz) file.write('\n#\n') file.write('# Rwork: ') file.write(rwork) file.write('\n# Rfree: ') file.write(rfree) file.write('\n# Percentage of residues outside Richardson phi-psi core: ') file.write(percent_phi_psi) file.write('\n# Percentage of residues with abnormal rotamers: ') file.write(percent_rotamer) file.write('\n# Percentage of residues flagged: ') file.write(percent_errors) file.write('\n#\n') file.write('# Residue list codes for severe abnormality types:\n') file.write('# (G)eometry, (V)an der Waals, (O)mega, (P)hi-psi, (C)is peptide,\n') file.write('# (R)otamer chi-1, (D)ensity\n') file.write('#\n') count = 0 while count < number_errors: aLine = aList_errors[count] file.write(aLine) file.write('\n') count = count + 1 file.write('#\n') file.close() ##################################################################### # Establish records diagnostics in PDB REMARK 465,470,500, format # ##################################################################### # Determination of missing amino acids from SEQRES records if there were any number_sequence = len(aList_sequence_resname) number_chains = len(aList_sequence_chain_id) print '\nNumber amino acids in SEQRES:',number_sequence,'over',number_chains,'chains\n' if number_sequence > 0: # Load data for a particular chain count_chains = 0 while count_chains < number_chains: # initialize lists that will be used for this sequence/structure comparison aList_sequence_resname_temp = [] aList_sequence_resnumber_temp = [] aList_structure_resname_temp = [] aList_structure_resnumber_temp = [] working_seq_chain = aList_sequence_chain_id[count_chains] sequence_match = 'no' # Load sequence data for the current chain count1 = 0 while count1 < number_sequence: seq_chain = aList_sequence_chain[count1] if working_seq_chain == seq_chain: resname = aList_sequence_resname[count1] aList_sequence_resname_temp.append(resname) aList_sequence_resnumber_temp.append('?') count1 = count1 + 1 # Load structure data for the current chain count1 = 0 while count1 < entity_count: structure_chain = aList_chain_store[count1] if working_seq_chain == structure_chain: resname = aList_res_name_store[count1] resnumber = aList_res_number_store[count1] aList_structure_resname_temp.append(resname) aList_structure_resnumber_temp.append(resnumber) count1 = count1 + 1 # Algorithm for establishing numbering in sequence-structure comparison # match to leading pentamer in this structure along sequence number_structure_temp = len(aList_structure_resname_temp) number_sequence_temp = len(aList_sequence_resname_temp) number_sequence_search = number_sequence_temp - 6 test_structure_resname_1 = aList_structure_resname_temp[0] test_structure_resname_2 = aList_structure_resname_temp[1] test_structure_resname_3 = aList_structure_resname_temp[2] test_structure_resname_4 = aList_structure_resname_temp[3] test_structure_resname_5 = aList_structure_resname_temp[4] structure_resnumber_start = aList_structure_resnumber_temp[0] structure_resnumber_start = int(structure_resnumber_start) count1 = 0 while count1 < number_sequence_search: count2 = count1 + 1 count3 = count1 + 2 count4 = count1 + 3 count5 = count1 + 4 test_sequence_resname_1 = aList_sequence_resname_temp[count1] test_sequence_resname_2 = aList_sequence_resname_temp[count2] test_sequence_resname_3 = aList_sequence_resname_temp[count3] test_sequence_resname_4 = aList_sequence_resname_temp[count4] test_sequence_resname_5 = aList_sequence_resname_temp[count5] if test_structure_resname_1 == test_sequence_resname_1: if test_structure_resname_2 == test_sequence_resname_2: if test_structure_resname_3 == test_sequence_resname_3: if test_structure_resname_4 == test_sequence_resname_4: if test_structure_resname_5 == test_sequence_resname_5: sequence_resnumber_start = structure_resnumber_start - count1 count1 = number_sequence_search sequence_match = 'yes' count1 = count1 + 1 # Now setup sequence numbering List if sequence_match == 'yes': count1=0 while count1 < number_sequence_temp: sequence_resnumber_put = sequence_resnumber_start + count1 sequence_resnumber_put = str(sequence_resnumber_put) aList_sequence_resnumber_temp[count1] = sequence_resnumber_put count1 = count1 + 1 # Now analyse and catch missing residues count1=0 while count1 < number_sequence_temp: sequence_resname_put = aList_sequence_resname_temp[count1] sequence_resnumber_put = aList_sequence_resnumber_temp[count1] count2=0 while count2 < number_structure_temp: structure_resnumber_put = aList_structure_resnumber_temp[count2] find_error = 'yes' if sequence_resnumber_put == structure_resnumber_put: find_error = 'no' count2 = number_structure_temp count2= count2 + 1 if find_error == 'yes': out_line = 'REMARK 465 1 ' + sequence_resname_put + ' ' + working_seq_chain + ' ' + sequence_resnumber_put aList_missing_residues.append(out_line) count1 = count1 + 1 # End of loop over chains count_chains = count_chains + 1 # Write missing residues number_missing_residues = len(aList_missing_residues) if number_missing_residues > 0: pdb_annotate.append('REMARK 465') pdb_annotate.append('REMARK 465 MISSING RESIDUES') pdb_annotate.append('REMARK 465 THE FOLLOWING RESIDUES WERE NOT LOCATED IN THE') pdb_annotate.append('REMARK 465 EXPERIMENT. (M=MODEL NUMBER; RES=RESIDUE NAME; C=CHAIN') pdb_annotate.append('REMARK 465 IDENTIFIER; SSSEQ=SEQUENCE NUMBER; I=INSERTION CODE.)') pdb_annotate.append('REMARK 465') pdb_annotate.append('REMARK 465 M RES C SSSEQI') count1 = 0 while count1 < number_missing_residues: out_line = aList_missing_residues[count1] pdb_annotate.append(out_line) count1 = count1 + 1 ################################################################ # Obtain a list of missing atoms in each residue (REMARK 470) # ################################################################ pdb_annotate.append('REMARK 470') pdb_annotate.append('REMARK 470 MISSING ATOM') pdb_annotate.append('REMARK 470 THE FOLLOWING RESIDUES HAVE MISSING ATOMS (M=MODEL NUMBER;') pdb_annotate.append('REMARK 470 RES=RESIDUE NAME; C=CHAIN IDENTIFIER; SSEQ=SEQUENCE NUMBER;') pdb_annotate.append('REMARK 470 I=INSERTION CODE):') pdb_annotate.append('REMARK 470 M RES CSSEQI ATOMS') number_allatoms = len(aList_allatoms_chain) count1 = 0 while count1 < entity_count: chain_store = aList_chain_store[count1] res_number_store = aList_res_number_store[count1] res_name_store = aList_res_name_store[count1] # Get all atoms for this residue aList_current_residue_atoms = [] count = 0 while count < number_allatoms: chain_all = aList_allatoms_chain[count] res_number_all = aList_allatoms_res_number[count] res_name_all = aList_allatoms_res_name[count] atom_name_all = aList_allatoms_atom_name[count] if chain_store == chain_all and res_number_store == res_number_all: aList_current_residue_atoms.append(atom_name_all) count = count + 1 # Process to find missing atoms in this residue number_residue_atoms = len(aList_current_residue_atoms) aList_atoms_expected = [] if res_name_store == 'GLY': aList_atoms_expected = aList_GLY_atoms if res_name_store == 'ALA': aList_atoms_expected = aList_ALA_atoms if res_name_store == 'VAL': aList_atoms_expected = aList_VAL_atoms if res_name_store == 'ILE': aList_atoms_expected = aList_ILE_atoms if res_name_store == 'LEU': aList_atoms_expected = aList_LEU_atoms if res_name_store == 'PHE': aList_atoms_expected = aList_PHE_atoms if res_name_store == 'PRO': aList_atoms_expected = aList_PRO_atoms if res_name_store == 'MET': aList_atoms_expected = aList_MET_atoms if res_name_store == 'TRP': aList_atoms_expected = aList_TRP_atoms if res_name_store == 'CYS': aList_atoms_expected = aList_CYS_atoms if res_name_store == 'SER': aList_atoms_expected = aList_SER_atoms if res_name_store == 'THR': aList_atoms_expected = aList_THR_atoms if res_name_store == 'ASN': aList_atoms_expected = aList_ASN_atoms if res_name_store == 'GLN': aList_atoms_expected = aList_GLN_atoms if res_name_store == 'TYR': aList_atoms_expected = aList_TYR_atoms if res_name_store == 'HIS': aList_atoms_expected = aList_HIS_atoms if res_name_store == 'ASP': aList_atoms_expected = aList_ASP_atoms if res_name_store == 'GLU': aList_atoms_expected = aList_GLU_atoms if res_name_store == 'LYS': aList_atoms_expected = aList_LYS_atoms if res_name_store == 'ARG': aList_atoms_expected = aList_ARG_atoms number_atoms_expected = len(aList_atoms_expected) if number_atoms_expected > 0: # Check each expected atomname to see if it is found aList_atoms_expected_flag = [] count2 = 0 while count2 < number_atoms_expected: atom_name_expected = aList_atoms_expected[count2] found = 'no' count_current_atoms = 0 while count_current_atoms < number_residue_atoms: atom_name = aList_current_residue_atoms[count_current_atoms] if atom_name_expected == atom_name: found = 'yes' count_current_atoms = count_current_atoms + 1 if found == 'yes': aList_atoms_expected_flag.append('yes') else: aList_atoms_expected_flag.append('no') count2 = count2 + 1 # Collect missing atoms for this residue into a list and create formatted REMARK 470 write_flag = 'no' out_list = '' count2 = 0 while count2 < number_atoms_expected: found = aList_atoms_expected_flag[count2] if found == 'no': atom_name = aList_atoms_expected[count2] number_chars = len(atom_name) if number_chars == 1: atom_name = atom_name + ' ' if number_chars == 2: atom_name = atom_name + ' ' out_list = out_list + ' ' + atom_name write_flag = 'yes' count2 = count2 + 1 if write_flag == 'yes': str_res_number_store = str(res_number_store) number_chars = len(str_res_number_store) if number_chars == 1: str_res_number_store = str_res_number_store + ' ' if number_chars == 2: str_res_number_store = str_res_number_store + ' ' if number_chars == 3: str_res_number_store = str_res_number_store + ' ' out_line = 'REMARK 470 1 ' + res_name_store + ' ' + chain_store + ' ' + str_res_number_store + ' ' + out_list pdb_annotate.append(out_line) # End of loop over entities count1 = count1 + 1 ########################################################################## # MI stereochemistry subtopic to identify discrete and errors REMARK 500 # ########################################################################## pdb_annotate.append('REMARK 500') pdb_annotate.append('REMARK 500 GEOMETRY AND STEREOCHEMISTRY') pdb_annotate.append('REMARK 500 SUBTOPIC: DISCRETE DISORDER') pdb_annotate.append('REMARK 500') pdb_annotate.append('REMARK 500 RESIDUES IN MULTIPLE CONFORMATIONS') pdb_annotate.append('REMARK 500 THE FOLLOWING RESIDUES WERE DESCRIBED BY MULTIPLE') pdb_annotate.append('REMARK 500 CONFORMATIONS.(M=MODEL NUMBER; RES=RESIDUE NAME; C=CHAIN') pdb_annotate.append('REMARK 500 IDENTIFIER; SSSEQ=SEQUENCE NUMBER; I=INSERTION CODE.)') pdb_annotate.append('REMARK 500') pdb_annotate.append('REMARK 500 M RES C SSSEQI') pdb_annotate.append('REMARK 500') # Reduce disorder atom list to residue list disorder_count = len(aList_disorder_chain) write_error = 'no' count = 0 while count < entity_count: chain_store = aList_chain_store[count] res_number_store = aList_res_number_store[count] count1 = 0 found_error = 'no' while count1 < disorder_count: chain = aList_disorder_chain[count1] res_number = aList_disorder_resno[count1] res_name = aList_disorder_resname[count1] if chain == chain_store and res_number == res_number_store and found_error == 'no': out_line = 'REMARK 500 1 ' + res_name + ' ' + chain + ' ' + res_number pdb_annotate.append(out_line) found_error = 'yes' write_error = 'yes' count1 = count1 + 1 count = count + 1 if write_error == 'no': pdb_annotate.append('REMARK 500 THERE WERE NO RESIDUES IN MULTIPLE CONFORMATIONS') # Covalent bond lengths pdb_annotate.append('REMARK 500') pdb_annotate.append('REMARK 500 GEOMETRY AND STEREOCHEMISTRY') pdb_annotate.append('REMARK 500 SUBTOPIC: COVALENT BOND LENGTHS (MI ERROR LIST)') pdb_annotate.append('REMARK 500') pdb_annotate.append('REMARK 500 THE FOLLOWING RESIDUES CONTAINED ABNORMAL BOND LENGTHS') pdb_annotate.append('REMARK 500 (M=MODEL NUMBER; RES=RESIDUE NAME; C=CHAIN') pdb_annotate.append('REMARK 500 IDENTIFIER; SSSEQ=SEQUENCE NUMBER; I=INSERTION CODE.)') pdb_annotate.append('REMARK 500') pdb_annotate.append('REMARK 500 M RES C SSSEQI') pdb_annotate.append('REMARK 500') write_error = 'no' count = 0 while count < entity_count: chain_store = aList_chain_store[count] res_number_store = aList_res_number_store[count] count1 = 0 found_error = 'no' while count1 < bond_count: chain = aList_bonds_chain[count1] res_number = aList_bonds_resno[count1] res_name = aList_bonds_resname[count1] if chain == chain_store and res_number == res_number_store and found_error == 'no': out_line = 'REMARK 500 1 ' + res_name + ' ' + chain + ' ' + res_number pdb_annotate.append(out_line) found_error = 'yes' write_error = 'yes' count1 = count1 + 1 count = count + 1 if write_error == 'no': pdb_annotate.append('REMARK 500 THERE WERE NO SEVERE ABNORMALITIES IN THIS CATEGORY') # Covalent bond angles pdb_annotate.append('REMARK 500') pdb_annotate.append('REMARK 500 GEOMETRY AND STEREOCHEMISTRY') pdb_annotate.append('REMARK 500 SUBTOPIC: COVALENT BOND ANGLES (MI ERROR LIST)') pdb_annotate.append('REMARK 500') pdb_annotate.append('REMARK 500 THE FOLLOWING RESIDUES CONTAINED ABNORMAL BOND ANGLES') pdb_annotate.append('REMARK 500 (M=MODEL NUMBER; RES=RESIDUE NAME; C=CHAIN') pdb_annotate.append('REMARK 500 IDENTIFIER; SSSEQ=SEQUENCE NUMBER; I=INSERTION CODE.)') pdb_annotate.append('REMARK 500') pdb_annotate.append('REMARK 500 M RES C SSSEQI') pdb_annotate.append('REMARK 500') write_error = 'no' count = 0 while count < entity_count: chain_store = aList_chain_store[count] res_number_store = aList_res_number_store[count] count1 = 0 found_error = 'no' while count1 < angles_count: chain = aList_angles_chain[count1] res_number = aList_angles_resno[count1] res_name = aList_angles_resname[count1] if chain == chain_store and res_number == res_number_store and found_error == 'no': out_line = 'REMARK 500 1 ' + res_name + ' ' + chain + ' ' + res_number pdb_annotate.append(out_line) found_error = 'yes' write_error = 'yes' count1 = count1 + 1 count = count + 1 if write_error == 'no': pdb_annotate.append('REMARK 500 THERE WERE NO SEVERE ABNORMALITIES IN THIS CATEGORY') # Chiral centers pdb_annotate.append('REMARK 500') pdb_annotate.append('REMARK 500 GEOMETRY AND STEREOCHEMISTRY') pdb_annotate.append('REMARK 500 SUBTOPIC: CHIRAL CENTERS (MI ERROR LIST)') pdb_annotate.append('REMARK 500') pdb_annotate.append('REMARK 500 THE FOLLOWING RESIDUES CONTAINED ABNORMAL CHIRAL CENTERS') pdb_annotate.append('REMARK 500 (M=MODEL NUMBER; RES=RESIDUE NAME; C=CHAIN') pdb_annotate.append('REMARK 500 IDENTIFIER; SSSEQ=SEQUENCE NUMBER; I=INSERTION CODE.)') pdb_annotate.append('REMARK 500') pdb_annotate.append('REMARK 500 M RES C SSSEQI') pdb_annotate.append('REMARK 500') write_error = 'no' count = 0 while count < entity_count: chain_store = aList_chain_store[count] res_number_store = aList_res_number_store[count] count1 = 0 found_error = 'no' while count1 < chiral_count: chain = aList_chiral_chain[count1] res_number = aList_chiral_resno[count1] res_name = aList_chiral_resname[count1] if chain == chain_store and res_number == res_number_store and found_error == 'no': out_line = 'REMARK 500 1 ' + res_name + ' ' + chain + ' ' + res_number pdb_annotate.append(out_line) found_error = 'yes' write_error = 'yes' count1 = count1 + 1 count = count + 1 if write_error == 'no': pdb_annotate.append('REMARK 500 THERE WERE NO SEVERE ABNORMALITIES IN THIS CATEGORY') # Abnormal omega pdb_annotate.append('REMARK 500') pdb_annotate.append('REMARK 500 GEOMETRY AND STEREOCHEMISTRY') pdb_annotate.append('REMARK 500 SUBTOPIC: NON-CIS, NON-TRANS (MI ERROR LIST)') pdb_annotate.append('REMARK 500') pdb_annotate.append('REMARK 500 THE FOLLOWING RESIDUES CONTAINED ABNORMAL OMEGA ANGLES') pdb_annotate.append('REMARK 500 (M=MODEL NUMBER; RES=RESIDUE NAME; C=CHAIN') pdb_annotate.append('REMARK 500 IDENTIFIER; SSSEQ=SEQUENCE NUMBER; I=INSERTION CODE.)') pdb_annotate.append('REMARK 500') pdb_annotate.append('REMARK 500 M RES C SSSEQI') pdb_annotate.append('REMARK 500') write_error = 'no' count = 0 while count < entity_count: chain_store = aList_chain_store[count] res_number_store = aList_res_number_store[count] count1 = 0 found_error = 'no' while count1 < omega_count: chain = aList_omega_chain[count1] res_number = aList_omega_resno[count1] res_name = aList_omega_resname[count1] if chain == chain_store and res_number == res_number_store and found_error == 'no': out_line = 'REMARK 500 1 ' + res_name + ' ' + chain + ' ' + res_number pdb_annotate.append(out_line) found_error = 'yes' write_error = 'yes' count1 = count1 + 1 count = count + 1 if write_error == 'no': pdb_annotate.append('REMARK 500 THERE WERE NO SEVERE ABNORMALITIES IN THIS CATEGORY') # Close contacts (note - all) pdb_annotate.append('REMARK 500') pdb_annotate.append('REMARK 500 GEOMETRY AND STEREOCHEMISTRY') pdb_annotate.append('REMARK 500 SUBTOPIC: CLOSE CONTACTS (MI ERROR LIST)') pdb_annotate.append('REMARK 500') pdb_annotate.append('REMARK 500 THE FOLLOWING RESIDUES CONTAINED ABNORMAL CONTACT DISTANCES') pdb_annotate.append('REMARK 500 (M=MODEL NUMBER; RES=RESIDUE NAME; C=CHAIN') pdb_annotate.append('REMARK 500 IDENTIFIER; SSSEQ=SEQUENCE NUMBER; I=INSERTION CODE.)') pdb_annotate.append('REMARK 500') pdb_annotate.append('REMARK 500 M RES C SSSEQI') pdb_annotate.append('REMARK 500') write_error = 'no' count = 0 while count < entity_count: chain_store = aList_chain_store[count] res_number_store = aList_res_number_store[count] count1 = 0 found_error = 'no' while count1 < contacts_count: chain = aList_contacts_chain[count1] res_number = aList_contacts_resno[count1] res_name = aList_contacts_resname[count1] if chain == chain_store and res_number == res_number_store and found_error == 'no': out_line = 'REMARK 500 1 ' + res_name + ' ' + chain + ' ' + res_number pdb_annotate.append(out_line) found_error = 'yes' write_error = 'yes' count1 = count1 + 1 count = count + 1 if write_error == 'no': pdb_annotate.append('REMARK 500 THERE WERE NO SEVERE ABNORMALITIES IN THIS CATEGORY') # PHI-PSI data pdb_annotate.append('REMARK 500') pdb_annotate.append('REMARK 500 GEOMETRY AND STEREOCHEMISTRY') pdb_annotate.append('REMARK 500 SUBTOPIC: TORSION ANGLES (MI ERROR LIST)') pdb_annotate.append('REMARK 500') pdb_annotate.append('REMARK 500 THE FOLLOWING RESIDUES CONTAINED ABNORMAL PHI-PSI ANGLES') pdb_annotate.append('REMARK 500 (M=MODEL NUMBER; RES=RESIDUE NAME; C=CHAIN') pdb_annotate.append('REMARK 500 IDENTIFIER; SSSEQ=SEQUENCE NUMBER; I=INSERTION CODE.)') pdb_annotate.append('REMARK 500') pdb_annotate.append('REMARK 500 M RES C SSSEQI') pdb_annotate.append('REMARK 500') write_error = 'no' count = 0 while count < entity_count: chain_store = aList_chain_store[count] res_number_store = aList_res_number_store[count] count1 = 0 found_error = 'no' while count1 < rama_count: chain = aList_rama_chain[count1] res_number = aList_rama_resno[count1] res_name = aList_rama_resname[count1] if chain == chain_store and res_number == res_number_store and found_error == 'no': out_line = 'REMARK 500 1 ' + res_name + ' ' + chain + ' ' + res_number pdb_annotate.append(out_line) found_error = 'yes' write_error = 'yes' count1 = count1 + 1 count = count + 1 if write_error == 'no': pdb_annotate.append('REMARK 500 THERE WERE NO SEVERE ABNORMALITIES IN THIS CATEGORY') # invented MI remark to identify density issues pdb_annotate.append('REMARK 501') pdb_annotate.append('REMARK 501 OTHER VALIDATION') pdb_annotate.append('REMARK 501 SUBTOPIC: ELECTRON DENSITY (MI ERROR LIST)') pdb_annotate.append('REMARK 501') pdb_annotate.append('REMARK 501 THE FOLLOWING RESIDUES ARE NEAR DENSITY DIFFERENCE FEATURES') pdb_annotate.append('REMARK 501 (M=MODEL NUMBER; RES=RESIDUE NAME; C=CHAIN') pdb_annotate.append('REMARK 501 IDENTIFIER; SSSEQ=SEQUENCE NUMBER; I=INSERTION CODE.)') pdb_annotate.append('REMARK 501') pdb_annotate.append('REMARK 501 M RES C SSSEQI') pdb_annotate.append('REMARK 501') write_error = 'no' count = 0 while count < entity_count: chain_store = aList_chain_store[count] res_number_store = aList_res_number_store[count] count1 = 0 found_error = 'no' while count1 < density_count: chain = aList_density_chain[count1] res_number = aList_density_resno[count1] res_name = aList_density_resname[count1] if chain == chain_store and res_number == res_number_store and found_error == 'no': out_line = 'REMARK 501 1 ' + res_name + ' ' + chain + ' ' + res_number pdb_annotate.append(out_line) found_error = 'yes' write_error = 'yes' count1 = count1 + 1 count = count + 1 if write_error == 'no': pdb_annotate.append('REMARK 501 THERE WERE NO SEVERE ABNORMALITIES IN THIS CATEGORY') # invented MI remark to identify cis-pep pdb_annotate.append('REMARK 501') pdb_annotate.append('REMARK 501 OTHER VALIDATION') pdb_annotate.append('REMARK 501 SUBTOPIC: CIS PEPTIDE (MI ERROR LIST)') pdb_annotate.append('REMARK 501') pdb_annotate.append('REMARK 501 THE FOLLOWING RESIDUES HAVE CIS PEPTIDE BONDS') pdb_annotate.append('REMARK 501 (M=MODEL NUMBER; RES=RESIDUE NAME; C=CHAIN') pdb_annotate.append('REMARK 501 IDENTIFIER; SSSEQ=SEQUENCE NUMBER; I=INSERTION CODE.)') pdb_annotate.append('REMARK 501') pdb_annotate.append('REMARK 501 M RES C SSSEQI') pdb_annotate.append('REMARK 501') write_error = 'no' count = 0 while count < entity_count: chain_store = aList_chain_store[count] res_number_store = aList_res_number_store[count] count1 = 0 found_error = 'no' while count1 < cis_count: chain = aList_cis_chain[count1] res_number = aList_cis_resno[count1] res_name = aList_cis_resname[count1] if chain == chain_store and res_number == res_number_store and found_error == 'no': out_line = 'REMARK 501 1 ' + res_name + ' ' + chain + ' ' + res_number pdb_annotate.append(out_line) found_error = 'yes' write_error = 'yes' count1 = count1 + 1 count = count + 1 if write_error == 'no': pdb_annotate.append('REMARK 501 THERE WERE NO SEVERE ABNORMALITIES IN THIS CATEGORY') # invented MI remark to identify rotamer (chi-1) pdb_annotate.append('REMARK 501') pdb_annotate.append('REMARK 501 OTHER VALIDATION') pdb_annotate.append('REMARK 501 SUBTOPIC: ROTAMER (MI ERROR LIST)') pdb_annotate.append('REMARK 501') pdb_annotate.append('REMARK 501 THE FOLLOWING RESIDUES HAVE CHI-1 ANGLES WHICH DEVIATE MORE') pdb_annotate.append('REMARK 501 THAN 45 DEGREES FROM A KNOWN ROTAMER') pdb_annotate.append('REMARK 501 (M=MODEL NUMBER; RES=RESIDUE NAME; C=CHAIN') pdb_annotate.append('REMARK 501 IDENTIFIER; SSSEQ=SEQUENCE NUMBER; I=INSERTION CODE.)') pdb_annotate.append('REMARK 501') pdb_annotate.append('REMARK 501 M RES C SSSEQI') pdb_annotate.append('REMARK 501') write_error = 'no' count = 0 while count < entity_count: chain_store = aList_chain_store[count] res_number_store = aList_res_number_store[count] count1 = 0 found_error = 'no' while count1 < rotamer_count: chain = aList_rotamer_chain[count1] res_number = aList_rotamer_resno[count1] res_name = aList_rotamer_resname[count1] if chain == chain_store and res_number == res_number_store and found_error == 'no': out_line = 'REMARK 501 1 ' + res_name + ' ' + chain + ' ' + res_number pdb_annotate.append(out_line) found_error = 'yes' write_error = 'yes' count1 = count1 + 1 count = count + 1 if write_error == 'no': pdb_annotate.append('REMARK 501 THERE WERE NO SEVERE ABNORMALITIES IN THIS CATEGORY') ################################ # Insert annotation into PDB # ################################ num_lines = len(pdb_annotate) num_SEQRES = len(aList_SEQRES) file = open(filename_pdb_full,'r') allLines = file.readlines() file.close() os.remove(filename_pdb_full) file = open(filename_pdb_full,'w') tag_prev = '?' for eachLine in allLines: tag = eachLine[0:6] tag = tag.strip() if tag != 'REMARK' and tag_prev == 'REMARK': count = 0 while count < num_lines: out_line = pdb_annotate[count] file.write(out_line) file.write('\n') count = count + 1 count = 0 while count < num_SEQRES: out_line = aList_SEQRES[count] file.write(out_line) file.write('\n') count = count + 1 file.write(eachLine) tag_prev = tag file.close() else: print 'Structure checking only enabled for REFMAC5 refinement' file = open(errorfile,'w') file.write('#\n') file.write('# Generation of error lists requires REFMAC5 refinement\n') file.write('#\n') file.close() ############################################################ # Standard file for nomalous difference map when available # ############################################################ if anomlabel != 'none' and siganomlabel != 'none' and ref_engine == 'refmac5': fileexists = os.path.exists('anom_diffmap') if fileexists != 0: os.remove('anom_diffmap.map') fileexists = os.path.exists('mi_anommap_out.mtz') if fileexists != 0: os.remove('mi_anommap_out.mtz') # Combine anomalous coefficients with refined phases back onto the refined mtz file = open('mi_cad.inp','w') file.write('LABIN FILE_NUMBER 1 ALL\n') file.write('LABIN FILE_NUMBER 2 ALL\n') file.write('END\n') file.close() runcad = 'cad HKLIN1 ' + filename_mtz + ' HKLIN2 mi_anommap.mtz HKLOUT mi_anommap_out.mtz < mi_cad.inp > mi_cad.log' os.system(runcad) fileexists = os.path.exists('mi_anommap_out.mtz') if fileexists != 0: os.remove('mi_cad.log') os.remove('mi_cad.inp') os.remove('mi_anommap.mtz') else: print 'The CAD run to reattach anomalous difference data seems to have failed' time.sleep(4) return 1 # Use special CCP4/FFT condition that rotates phases for anomalous difference maps file = open('mi_fft.inp','w') file.write('LABIN DANO=') file.write(anomlabel) file.write(' PHI=PHIC\n') file.write('END\n') file.close() runfft = 'fft HKLIN mi_anommap_out.mtz MAPOUT mi_1ff.map < mi_fft.inp 1> mi_fft.log 2>mi_fft_err.log' os.system(runfft) os.remove('mi_fft.inp') fileexists = os.path.exists('mi_fft.log') if fileexists != 0: os.remove('mi_fft.log') fileexists = os.path.exists('mi_fft_err.log') if fileexists != 0: os.remove('mi_fft_err.log') # Note that FFT may fail if anom columns are present but unfilled so not a stop fileexists = os.path.exists('mi_1ff.map') if fileexists != 0: print 'Creating anomalous difference map file: anom_diffmap.map' # Build cell around the protein in the anomalous difference map with CCP4/MAPMASK file = open('mi_mapmask.inp','w') file.write('BORDER 5.0\n') file.write('EXTEND XTAL\n') file.write('END\n') file.close() runmapmask = 'mapmask XYZIN ' + filename_pdb + ' MAPIN mi_1ff.map MAPOUT anom_diffmap.map < mi_mapmask.inp > mi_mapmask.log' os.system(runmapmask) fileexists = os.path.exists('anom_diffmap.map') if fileexists == 0: print 'MAPMASK for anomalous difference map failed' time.sleep(4) return 1 else: os.remove('mi_mapmask.inp') os.remove('mi_mapmask.log') os.remove('mi_1ff.map') # Rename mtz carrying anomalous data to standard refinement output os.remove(filename_mtz) os.rename('mi_anommap_out.mtz',filename_mtz) filename_anom_full = os.path.join(workingdir,'anom_diffmap.map') ###################### # Append project log # ###################### print 'Writing project log' runtime = time.ctime(time.time()) file = open(projectlog,'a') file.seek(0,2) file.write('Job ID: ') file.write(job_id) file.write('\nDate: ') file.write(runtime) file.write('\nInput atoms: ') file.write(pdbfile) file.write('\nInput data: ') file.write(mtzfile) file.write('\nInput library: ') file.write(libfile) if ref_engine == 'rigid': file.write('\nOutput atoms: ') file.write(filename_pdb_full) file.write('\nOutput phased data: ') file.write(filename_mtz_full) file.write('\nOutput log: ') file.write(filename_log_full) file.write('\nOutput CIF log: ') file.write(filename_refmac_full) file.write('\nOptions: none\n') file.write('Summary: REFMAC5 rigid-body Rwork=') file.write(rwork) file.write(' Rfree=') file.write(rfree) file.write(' Resolution=') file.write(resolution_output) if ref_engine == 'refmac5': file.write('\nOutput atoms: ') file.write(filename_pdb_full) file.write('\nOutput phased data: ') file.write(filename_mtz_full) file.write('\nOutput log: ') file.write(filename_log_full) file.write('\nOutput CIF log: ') file.write(filename_refmac_full) file.write('\nOutput error list: ') file.write(filename_errors_full) file.write('\nOutput anomalous difference map: ') file.write(filename_anom_full) if water_pick == 'yes': file.write('\nOptions: water-pick\n') else: file.write('\nOptions: none\n') file.write('Summary: REFMAC5 Rwork=') file.write(rwork) file.write(' Rfree=') file.write(rfree) file.write(' RMSD(bonds)=') file.write(rmsd_bonds) file.write(' Resolution=') file.write(resolution_output) if ref_engine == 'shelx': file.write('\nOutput pdb file: ') file.write(filename_pdb_full) file.write('\nOutput precomputed map data file: ') file.write(filename_mtz_full) file.write('\nOutput log file: ') file.write(filename_log_full) file.write('\nOutput lst file: ') file.write(filename_lst_full) file.write('\nOptions: none\n') file.write('Summary: SHELXH Rwork=') file.write(rwork) file.write(' Resolution=') file.write(resolution_output) if ref_engine == 'refmac5' or ref_engine == 'shelx': file.write('\nParameters: Weight=') file.write(weight) file.write(' Cycles=') file.write(cycles) file.write(' Bfactor=') file.write(bref_type) file.write(' TLS_input_file=') file.write(tlsfile) file.write('\n---------------\n') file.close() # Clean-up job-specific temporary CCP4_SCR space fileexists = os.path.exists(temp_lib) if fileexists != 0: os.remove(temp_lib) fileexists = os.path.exists(working_ccp4_scratch) if fileexists != 0: dir_list = os.listdir(working_ccp4_scratch) number_files = len(dir_list) count = 0 while count < number_files: target_file = dir_list[count] target_file_full_path = os.path.join(working_ccp4_scratch,target_file) os.remove(target_file_full_path) count = count + 1 os.rmdir(working_ccp4_scratch) time.sleep(4) # return 0 if __name__ == "__main__": sys.exit(Run())
mifit/miexpert
mi_refine.py
Python
gpl-3.0
134,553
[ "CRYSTAL" ]
1f7066e547411f38e30c4a946971b6e224859486c6e9e110c8c7debc6b66b0a0
# Copyright (C) 2018, Bastian Eicher # See the README file for details, or visit http://0install.net. import os from os.path import join from xml.dom import minidom, Node from repo import cmd, registry, merge, incoming, formatting from repo.cmd import update def handle(args): if not cmd.find_config(missing_ok = True): from_registry = registry.lookup(args.uri) assert from_registry['type'] == 'local', 'Unsupported registry type in %s' % from_registry os.chdir(from_registry['path']) config = cmd.load_config() rel_uri = args.uri[len(config.REPOSITORY_BASE_URL):] feed_path = join('feeds', config.get_feeds_rel_path(rel_uri)) with open(feed_path, 'rb') as stream: doc = minidom.parse(stream) messages = [] for impl in merge.find_impls(doc.documentElement): impl_id = impl.getAttribute("id") impl_version = impl.getAttribute("version") impl_stability = impl.getAttribute("stability") if impl_id == args.id or impl_version == args.id: if args.stability and impl_stability != args.stability: messages.append('Implementation {id} (version {version}) stability set to {stability}'.format( id = impl_id, version = impl_version, stability = args.stability)) impl.setAttribute("stability", args.stability) if len(messages) > 0: commit_msg = 'Modified {uri}\n\n{messages}'.format(uri = args.uri, messages = '\n'.join(messages)) new_xml = formatting.format_doc(doc) incoming.write_to_git(feed_path, new_xml, commit_msg, config) update.do_update(config) else: print("No changes made.")
0install/0repo
repo/cmd/modify.py
Python
lgpl-2.1
1,536
[ "VisIt" ]
a79f9739a72759998a423ccbac952a88af00049bfea8e3107f6bffc54fdcf646
""" Acceptance tests for Content Libraries in Studio """ from ddt import ddt, data from nose.plugins.attrib import attr from flaky import flaky from .base_studio_test import StudioLibraryTest from ...fixtures.course import XBlockFixtureDesc from ...pages.studio.auto_auth import AutoAuthPage from ...pages.studio.utils import add_component from ...pages.studio.library import LibraryEditPage from ...pages.studio.users import LibraryUsersPage @attr('shard_2') @ddt class LibraryEditPageTest(StudioLibraryTest): """ Test the functionality of the library edit page. """ def setUp(self): # pylint: disable=arguments-differ """ Ensure a library exists and navigate to the library edit page. """ super(LibraryEditPageTest, self).setUp() self.lib_page = LibraryEditPage(self.browser, self.library_key) self.lib_page.visit() self.lib_page.wait_until_ready() def test_page_header(self): """ Scenario: Ensure that the library's name is displayed in the header and title. Given I have a library in Studio And I navigate to Library Page in Studio Then I can see library name in page header title And I can see library name in browser page title """ self.assertIn(self.library_info['display_name'], self.lib_page.get_header_title()) self.assertIn(self.library_info['display_name'], self.browser.title) def test_add_duplicate_delete_actions(self): """ Scenario: Ensure that we can add an HTML block, duplicate it, then delete the original. Given I have a library in Studio with no XBlocks And I navigate to Library Page in Studio Then there are no XBlocks displayed When I add Text XBlock Then one XBlock is displayed When I duplicate first XBlock Then two XBlocks are displayed And those XBlocks locators' are different When I delete first XBlock Then one XBlock is displayed And displayed XBlock are second one """ self.assertEqual(len(self.lib_page.xblocks), 0) # Create a new block: add_component(self.lib_page, "html", "Text") self.assertEqual(len(self.lib_page.xblocks), 1) first_block_id = self.lib_page.xblocks[0].locator # Duplicate the block: self.lib_page.click_duplicate_button(first_block_id) self.assertEqual(len(self.lib_page.xblocks), 2) second_block_id = self.lib_page.xblocks[1].locator self.assertNotEqual(first_block_id, second_block_id) # Delete the first block: self.lib_page.click_delete_button(first_block_id, confirm=True) self.assertEqual(len(self.lib_page.xblocks), 1) self.assertEqual(self.lib_page.xblocks[0].locator, second_block_id) def test_no_edit_visibility_button(self): """ Scenario: Ensure that library xblocks do not have 'edit visibility' buttons. Given I have a library in Studio with no XBlocks And I navigate to Library Page in Studio When I add Text XBlock Then one XBlock is displayed And no 'edit visibility' button is shown """ add_component(self.lib_page, "html", "Text") self.assertFalse(self.lib_page.xblocks[0].has_edit_visibility_button) def test_add_edit_xblock(self): """ Scenario: Ensure that we can add an XBlock, edit it, then see the resulting changes. Given I have a library in Studio with no XBlocks And I navigate to Library Page in Studio Then there are no XBlocks displayed When I add Multiple Choice XBlock Then one XBlock is displayed When I edit first XBlock And I go to basic tab And set it's text to a fairly trivial question about Battlestar Galactica And save XBlock Then one XBlock is displayed And first XBlock student content contains at least part of text I set """ self.assertEqual(len(self.lib_page.xblocks), 0) # Create a new problem block: add_component(self.lib_page, "problem", "Multiple Choice") self.assertEqual(len(self.lib_page.xblocks), 1) problem_block = self.lib_page.xblocks[0] # Edit it: problem_block.edit() problem_block.open_basic_tab() problem_block.set_codemirror_text( """ >>Who is "Starbuck"?<< (x) Kara Thrace ( ) William Adama ( ) Laura Roslin ( ) Lee Adama ( ) Gaius Baltar """ ) problem_block.save_settings() # Check that the save worked: self.assertEqual(len(self.lib_page.xblocks), 1) problem_block = self.lib_page.xblocks[0] self.assertIn("Laura Roslin", problem_block.student_content) def test_no_discussion_button(self): """ Ensure the UI is not loaded for adding discussions. """ self.assertFalse(self.browser.find_elements_by_css_selector('span.large-discussion-icon')) @flaky # TODO fix this, see TNL-2322 def test_library_pagination(self): """ Scenario: Ensure that adding several XBlocks to a library results in pagination. Given that I have a library in Studio with no XBlocks And I create 10 Multiple Choice XBlocks Then 10 are displayed. When I add one more Multiple Choice XBlock Then 1 XBlock will be displayed When I delete that XBlock Then 10 are displayed. """ self.assertEqual(len(self.lib_page.xblocks), 0) for _ in range(10): add_component(self.lib_page, "problem", "Multiple Choice") self.assertEqual(len(self.lib_page.xblocks), 10) add_component(self.lib_page, "problem", "Multiple Choice") self.assertEqual(len(self.lib_page.xblocks), 1) self.lib_page.click_delete_button(self.lib_page.xblocks[0].locator) self.assertEqual(len(self.lib_page.xblocks), 10) @data('top', 'bottom') def test_nav_present_but_disabled(self, position): """ Scenario: Ensure that the navigation buttons aren't active when there aren't enough XBlocks. Given that I have a library in Studio with no XBlocks The Navigation buttons should be disabled. When I add a multiple choice problem The Navigation buttons should be disabled. """ self.assertEqual(len(self.lib_page.xblocks), 0) self.assertTrue(self.lib_page.nav_disabled(position)) add_component(self.lib_page, "problem", "Multiple Choice") self.assertTrue(self.lib_page.nav_disabled(position)) def test_delete_deletes_only_desired_block(self): """ Scenario: Ensure that when deleting XBlock only desired XBlock is deleted Given that I have a library in Studio with no XBlocks And I create Blank Common Problem XBlock And I create Checkboxes XBlock When I delete Blank Problem XBlock Then Checkboxes XBlock is not deleted And Blank Common Problem XBlock is deleted """ self.assertEqual(len(self.lib_page.xblocks), 0) add_component(self.lib_page, "problem", "Blank Common Problem") add_component(self.lib_page, "problem", "Checkboxes") self.assertEqual(len(self.lib_page.xblocks), 2) self.assertIn("Blank Common Problem", self.lib_page.xblocks[0].name) self.assertIn("Checkboxes", self.lib_page.xblocks[1].name) self.lib_page.click_delete_button(self.lib_page.xblocks[0].locator) self.assertEqual(len(self.lib_page.xblocks), 1) problem_block = self.lib_page.xblocks[0] self.assertIn("Checkboxes", problem_block.name) @attr('shard_5') @ddt class LibraryNavigationTest(StudioLibraryTest): """ Test common Navigation actions """ def setUp(self): # pylint: disable=arguments-differ """ Ensure a library exists and navigate to the library edit page. """ super(LibraryNavigationTest, self).setUp() self.lib_page = LibraryEditPage(self.browser, self.library_key) self.lib_page.visit() self.lib_page.wait_until_ready() def populate_library_fixture(self, library_fixture): """ Create four pages worth of XBlocks, and offset by one so each is named after the number they should be in line by the user's perception. """ # pylint: disable=attribute-defined-outside-init self.blocks = [XBlockFixtureDesc('html', str(i)) for i in xrange(1, 41)] library_fixture.add_children(*self.blocks) def test_arbitrary_page_selection(self): """ Scenario: I can pick a specific page number of a Library at will. Given that I have a library in Studio with 40 XBlocks When I go to the 3rd page The first XBlock should be the 21st XBlock When I go to the 4th Page The first XBlock should be the 31st XBlock When I go to the 1st page The first XBlock should be the 1st XBlock When I go to the 2nd page The first XBlock should be the 11th XBlock """ self.lib_page.go_to_page(3) self.assertEqual(self.lib_page.xblocks[0].name, '21') self.lib_page.go_to_page(4) self.assertEqual(self.lib_page.xblocks[0].name, '31') self.lib_page.go_to_page(1) self.assertEqual(self.lib_page.xblocks[0].name, '1') self.lib_page.go_to_page(2) self.assertEqual(self.lib_page.xblocks[0].name, '11') def test_bogus_page_selection(self): """ Scenario: I can't pick a nonsense page number of a Library Given that I have a library in Studio with 40 XBlocks When I attempt to go to the 'a'th page The input field will be cleared and no change of XBlocks will be made When I attempt to visit the 5th page The input field will be cleared and no change of XBlocks will be made When I attempt to visit the -1st page The input field will be cleared and no change of XBlocks will be made When I attempt to visit the 0th page The input field will be cleared and no change of XBlocks will be made """ self.assertEqual(self.lib_page.xblocks[0].name, '1') self.lib_page.go_to_page('a') self.assertTrue(self.lib_page.check_page_unchanged('1')) self.lib_page.go_to_page(-1) self.assertTrue(self.lib_page.check_page_unchanged('1')) self.lib_page.go_to_page(5) self.assertTrue(self.lib_page.check_page_unchanged('1')) self.lib_page.go_to_page(0) self.assertTrue(self.lib_page.check_page_unchanged('1')) @data('top', 'bottom') def test_nav_buttons(self, position): """ Scenario: Ensure that the navigation buttons work. Given that I have a library in Studio with 40 XBlocks The previous button should be disabled. The first XBlock should be the 1st XBlock Then if I hit the next button The first XBlock should be the 11th XBlock Then if I hit the next button The first XBlock should be the 21st XBlock Then if I hit the next button The first XBlock should be the 31st XBlock And the next button should be disabled Then if I hit the previous button The first XBlock should be the 21st XBlock Then if I hit the previous button The first XBlock should be the 11th XBlock Then if I hit the previous button The first XBlock should be the 1st XBlock And the previous button should be disabled """ # Check forward navigation self.assertTrue(self.lib_page.nav_disabled(position, ['previous'])) self.assertEqual(self.lib_page.xblocks[0].name, '1') self.lib_page.move_forward(position) self.assertEqual(self.lib_page.xblocks[0].name, '11') self.lib_page.move_forward(position) self.assertEqual(self.lib_page.xblocks[0].name, '21') self.lib_page.move_forward(position) self.assertEqual(self.lib_page.xblocks[0].name, '31') self.lib_page.nav_disabled(position, ['next']) # Check backward navigation self.lib_page.move_back(position) self.assertEqual(self.lib_page.xblocks[0].name, '21') self.lib_page.move_back(position) self.assertEqual(self.lib_page.xblocks[0].name, '11') self.lib_page.move_back(position) self.assertEqual(self.lib_page.xblocks[0].name, '1') self.assertTrue(self.lib_page.nav_disabled(position, ['previous'])) def test_library_pagination(self): """ Scenario: Ensure that adding several XBlocks to a library results in pagination. Given that I have a library in Studio with 40 XBlocks Then 10 are displayed And the first XBlock will be the 1st one And I'm on the 1st page When I add 1 Multiple Choice XBlock Then 1 XBlock will be displayed And I'm on the 5th page The first XBlock will be the newest one When I delete that XBlock Then 10 are displayed And I'm on the 4th page And the first XBlock is the 31st one And the last XBlock is the 40th one. """ self.assertEqual(len(self.lib_page.xblocks), 10) self.assertEqual(self.lib_page.get_page_number(), '1') self.assertEqual(self.lib_page.xblocks[0].name, '1') add_component(self.lib_page, "problem", "Multiple Choice") self.assertEqual(len(self.lib_page.xblocks), 1) self.assertEqual(self.lib_page.get_page_number(), '5') self.assertEqual(self.lib_page.xblocks[0].name, "Multiple Choice") self.lib_page.click_delete_button(self.lib_page.xblocks[0].locator) self.assertEqual(len(self.lib_page.xblocks), 10) self.assertEqual(self.lib_page.get_page_number(), '4') self.assertEqual(self.lib_page.xblocks[0].name, '31') self.assertEqual(self.lib_page.xblocks[-1].name, '40') def test_delete_shifts_blocks(self): """ Scenario: Ensure that removing an XBlock shifts other blocks back. Given that I have a library in Studio with 40 XBlocks Then 10 are displayed And I will be on the first page When I delete the third XBlock There will be 10 displayed And the first XBlock will be the first one And the last XBlock will be the 11th one And I will be on the first page """ self.assertEqual(len(self.lib_page.xblocks), 10) self.assertEqual(self.lib_page.get_page_number(), '1') self.lib_page.click_delete_button(self.lib_page.xblocks[2].locator, confirm=True) self.assertEqual(len(self.lib_page.xblocks), 10) self.assertEqual(self.lib_page.xblocks[0].name, '1') self.assertEqual(self.lib_page.xblocks[-1].name, '11') self.assertEqual(self.lib_page.get_page_number(), '1') def test_previews(self): """ Scenario: Ensure the user is able to hide previews of XBlocks. Given that I have a library in Studio with 40 XBlocks Then previews are visible And when I click the toggle previews button Then the previews will not be visible And when I click the toggle previews button Then the previews are visible """ self.assertTrue(self.lib_page.are_previews_showing()) self.lib_page.toggle_previews() self.assertFalse(self.lib_page.are_previews_showing()) self.lib_page.toggle_previews() self.assertTrue(self.lib_page.are_previews_showing()) def test_previews_navigation(self): """ Scenario: Ensure preview settings persist across navigation. Given that I have a library in Studio with 40 XBlocks Then previews are visible And when I click the toggle previews button And click the next page button Then the previews will not be visible And the first XBlock will be the 11th one And the last XBlock will be the 20th one And when I click the toggle previews button And I click the previous page button Then the previews will be visible And the first XBlock will be the first one And the last XBlock will be the 11th one """ self.assertTrue(self.lib_page.are_previews_showing()) self.lib_page.toggle_previews() # Which set of arrows shouldn't matter for this test. self.lib_page.move_forward('top') self.assertFalse(self.lib_page.are_previews_showing()) self.assertEqual(self.lib_page.xblocks[0].name, '11') self.assertEqual(self.lib_page.xblocks[-1].name, '20') self.lib_page.toggle_previews() self.lib_page.move_back('top') self.assertTrue(self.lib_page.are_previews_showing()) self.assertEqual(self.lib_page.xblocks[0].name, '1') self.assertEqual(self.lib_page.xblocks[-1].name, '10') def test_preview_state_persistance(self): """ Scenario: Ensure preview state persists between page loads. Given that I have a library in Studio with 40 XBlocks Then previews are visible And when I click the toggle previews button And I revisit the page Then the previews will not be visible """ self.assertTrue(self.lib_page.are_previews_showing()) self.lib_page.toggle_previews() self.lib_page.visit() self.lib_page.wait_until_ready() self.assertFalse(self.lib_page.are_previews_showing()) def test_preview_add_xblock(self): """ Scenario: Ensure previews are shown when adding new blocks, regardless of preview setting. Given that I have a library in Studio with 40 XBlocks Then previews are visible And when I click the toggle previews button Then the previews will not be visible And when I add an XBlock Then I will be on the 5th page And the XBlock will have loaded a preview And when I revisit the library And I go to the 5th page Then the top XBlock will be the one I added And it will not have a preview And when I add an XBlock Then the XBlock I added will have a preview And the top XBlock will not have one. """ self.assertTrue(self.lib_page.are_previews_showing()) self.lib_page.toggle_previews() self.assertFalse(self.lib_page.are_previews_showing()) add_component(self.lib_page, "problem", "Checkboxes") self.assertEqual(self.lib_page.get_page_number(), '5') first_added = self.lib_page.xblocks[0] self.assertIn("Checkboxes", first_added.name) self.assertFalse(self.lib_page.xblocks[0].is_placeholder()) self.lib_page.visit() self.lib_page.wait_until_ready() self.lib_page.go_to_page(5) self.assertTrue(self.lib_page.xblocks[0].is_placeholder()) add_component(self.lib_page, "problem", "Multiple Choice") # DOM has detatched the element since last assignment first_added = self.lib_page.xblocks[0] second_added = self.lib_page.xblocks[1] self.assertIn("Multiple Choice", second_added.name) self.assertFalse(second_added.is_placeholder()) self.assertTrue(first_added.is_placeholder()) def test_edit_with_preview(self): """ Scenario: Editing an XBlock should show me a preview even if previews are hidden. Given that I have a library in Studio with 40 XBlocks Then previews are visible And when I click the toggle previews button Then the previews will not be visible And when I edit the first XBlock Then the first XBlock will show a preview And the other XBlocks will still be placeholders """ self.assertTrue(self.lib_page.are_previews_showing()) self.lib_page.toggle_previews() self.assertFalse(self.lib_page.are_previews_showing()) target = self.lib_page.xblocks[0] target.edit() target.save_settings() self.assertFalse(target.is_placeholder()) self.assertTrue(all([xblock.is_placeholder() for xblock in self.lib_page.xblocks[1:]])) def test_duplicate_xblock_pagination(self): """ Scenario: Duplicating an XBlock should not shift the page if the XBlock is not at the end. Given that I have a library in Studio with 40 XBlocks When I duplicate the third XBlock Then the page should not change And the duplicate XBlock should be there And it should show a preview And there should not be more than 10 XBlocks visible. """ third_block_id = self.lib_page.xblocks[2].locator self.lib_page.click_duplicate_button(third_block_id) self.lib_page.wait_until_ready() target = self.lib_page.xblocks[3] self.assertIn('Duplicate', target.name) self.assertFalse(target.is_placeholder()) self.assertEqual(len(self.lib_page.xblocks), 10) def test_duplicate_xblock_pagination_end(self): """ Scenario: Duplicating an XBlock if it's the last one should bring me to the next page with a preview. Given that I have a library in Studio with 40 XBlocks And when I hide previews And I duplicate the last XBlock The page should change to page 2 And the duplicate XBlock should be the first XBlock And it should not be a placeholder """ self.lib_page.toggle_previews() last_block_id = self.lib_page.xblocks[-1].locator self.lib_page.click_duplicate_button(last_block_id) self.lib_page.wait_until_ready() self.assertEqual(self.lib_page.get_page_number(), '2') target_block = self.lib_page.xblocks[0] self.assertIn('Duplicate', target_block.name) self.assertFalse(target_block.is_placeholder()) class LibraryUsersPageTest(StudioLibraryTest): """ Test the functionality of the library "Instructor Access" page. """ def setUp(self): super(LibraryUsersPageTest, self).setUp() # Create a second user for use in these tests: AutoAuthPage(self.browser, username="second", email="second@example.com", no_login=True).visit() self.page = LibraryUsersPage(self.browser, self.library_key) self.page.visit() def _refresh_page(self): """ Reload the page. """ self.page = LibraryUsersPage(self.browser, self.library_key) self.page.visit() self.page.wait_until_no_loading_indicator() @flaky # TODO fix this; see TNL-2647 def test_user_management(self): """ Scenario: Ensure that we can edit the permissions of users. Given I have a library in Studio where I am the only admin assigned (which is the default for a newly-created library) And I navigate to Library "Instructor Access" Page in Studio Then there should be one user listed (myself), and I must not be able to remove myself or my instructor privilege. When I click Add Instructor Then I see a form to complete When I complete the form and submit it Then I can see the new user is listed as a "User" of the library When I click to Add Staff permissions to the new user Then I can see the new user has staff permissions and that I am now able to promote them to an Admin or remove their staff permissions. When I click to Add Admin permissions to the new user Then I can see the new user has admin permissions and that I can now remove Admin permissions from either user. """ def check_is_only_admin(user): """ Ensure user is an admin user and cannot be removed. (There must always be at least one admin user.) """ self.assertIn("admin", user.role_label.lower()) self.assertFalse(user.can_promote) self.assertFalse(user.can_demote) self.assertFalse(user.can_delete) self.assertTrue(user.has_no_change_warning) self.assertIn("Promote another member to Admin to remove your admin rights", user.no_change_warning_text) self.assertEqual(len(self.page.users), 1) user = self.page.users[0] self.assertTrue(user.is_current_user) check_is_only_admin(user) # Add a new user: self.assertTrue(self.page.has_add_button) self.assertFalse(self.page.new_user_form_visible) self.page.click_add_button() self.assertTrue(self.page.new_user_form_visible) self.page.set_new_user_email('second@example.com') self.page.click_submit_new_user_form() # Check the new user's listing: def get_two_users(): """ Expect two users to be listed, one being me, and another user. Returns me, them """ users = self.page.users self.assertEqual(len(users), 2) self.assertEqual(len([u for u in users if u.is_current_user]), 1) if users[0].is_current_user: return users[0], users[1] else: return users[1], users[0] self._refresh_page() user_me, them = get_two_users() check_is_only_admin(user_me) self.assertIn("user", them.role_label.lower()) self.assertTrue(them.can_promote) self.assertIn("Add Staff Access", them.promote_button_text) self.assertFalse(them.can_demote) self.assertTrue(them.can_delete) self.assertFalse(them.has_no_change_warning) # Add Staff permissions to the new user: them.click_promote() self._refresh_page() user_me, them = get_two_users() check_is_only_admin(user_me) self.assertIn("staff", them.role_label.lower()) self.assertTrue(them.can_promote) self.assertIn("Add Admin Access", them.promote_button_text) self.assertTrue(them.can_demote) self.assertIn("Remove Staff Access", them.demote_button_text) self.assertTrue(them.can_delete) self.assertFalse(them.has_no_change_warning) # Add Admin permissions to the new user: them.click_promote() self._refresh_page() user_me, them = get_two_users() self.assertIn("admin", user_me.role_label.lower()) self.assertFalse(user_me.can_promote) self.assertTrue(user_me.can_demote) self.assertTrue(user_me.can_delete) self.assertFalse(user_me.has_no_change_warning) self.assertIn("admin", them.role_label.lower()) self.assertFalse(them.can_promote) self.assertTrue(them.can_demote) self.assertIn("Remove Admin Access", them.demote_button_text) self.assertTrue(them.can_delete) self.assertFalse(them.has_no_change_warning) # Delete the new user: them.click_delete() self._refresh_page() self.assertEqual(len(self.page.users), 1) user = self.page.users[0] self.assertTrue(user.is_current_user)
tiagochiavericosta/edx-platform
common/test/acceptance/tests/studio/test_studio_library.py
Python
agpl-3.0
27,414
[ "VisIt" ]
2dedc7f9b8eba057c7041569deade3cb78dcfbf1c80ece79b240e4b8330341e7
# -*- coding: utf-8 -*- """Van der Waals density functional. This module implements the Dion-Rydberg-Schröder-Langreth-Lundqvist XC-functional. There are two implementations: 1. A simlpe real-space double sum. 2. A more efficient FFT implementation based on the Román-Péres-Soler paper. """ import os import sys import pickle from math import sin, cos, exp, pi, log, sqrt, ceil import numpy as np from numpy.fft import fft, rfftn, irfftn from gpaw.utilities.timing import nulltimer from gpaw.xc.libxc import LibXC from gpaw.xc.gga import GGA from gpaw.grid_descriptor import GridDescriptor from gpaw.utilities.tools import construct_reciprocal from gpaw.fd_operators import Gradient from gpaw import setup_paths, extra_parameters import gpaw.mpi as mpi import _gpaw def T(w, x, y, z): return 0.5 * ((1.0 / (w + x) + 1.0 / (y + z)) * (1.0 / ((w + y) * (x + z)) + 1.0 / ((w + z) * (y + x)))) def W(a, b): return 2 * ((3 - a**2) * b * cos(b) * sin(a) + (3 - b**2) * a * cos(a) * sin(b) + (a**2 + b**2 - 3) * sin(a) * sin(b) - 3 * a * b * cos(a) * cos(b)) / (a * b)**3 eta = 8 * pi / 9 def nu(y, d): return 0.5 * y**2 / (1 - exp(-0.5 * eta * (y / d)**2)) def f(a, b, d, dp): va = nu(a, d) vb = nu(b, d) vpa = nu(a, dp) vpb = nu(b, dp) return 2 * (a * b)**2 * W(a, b) * T(va, vb, vpa, vpb) / pi**2 def phi(d, dp): """vdW-DF kernel.""" from scipy.integrate import quad kwargs = dict(epsabs=1.0e-6, epsrel=1.0e-6, limit=400) cut = 35 return quad(lambda y: quad(f, 0, cut, (y, d, dp), **kwargs)[0], 0, cut, **kwargs)[0] C = 12 * (4 * pi / 9)**3 def phi_asymptotic(d, dp): """Asymptotic behavior of vdW-DF kernel.""" d2 = d**2 dp2 = dp**2 return -C / (d2 * dp2 * (d2 + dp2)) def hRPS(x, xc=1.0): """Cutoff function from Román-Péres-Soler paper.""" x1 = x / xc xm = x1 * 1.0 y = -x1 z = 1.0 + x1 for m in range(2, 13): xm *= x1 y -= xm / m if m < 12: z += xm y = np.exp(y) return xc * (1.0 - y), z * y class VDWFunctional(GGA): """Base class for vdW-DF.""" def __init__(self, name, world=None, q0cut=5.0, phi0=0.5, ds=1.0, Dmax=20.0, nD=201, ndelta=21, soft_correction=False, kernel=None, Zab=None, vdwcoef=1.0, verbose=False, energy_only=False): """vdW-DF. parameters: name: str Name of functional. world: MPI communicator Communicator to parallelize over. Defaults to gpaw.mpi.world. q0cut: float Maximum value for q0. phi0: float Smooth value for phi(0,0). ds: float Cutoff for smooth kernel. Dmax: float Maximum value for D. nD: int Number of values for D in kernel-table. ndelta: int Number of values for delta in kernel-table. soft_correction: bool Correct for soft kernel. kernel: Which kernel to use. Zab: parameter in nonlocal kernel. vdwcoef: float Scaling of vdW energy. verbose: bool Print useful information. """ if world is None: self.world = mpi.world else: self.world = world self.q0cut = q0cut self.phi0 = phi0 self.ds = ds self.delta_i = np.linspace(0, 1.0, ndelta) self.D_j = np.linspace(0, Dmax, nD) self.verbose = verbose self.read_table() self.soft_correction = soft_correction if soft_correction: dD = self.D_j[1] self.C_soft = np.dot(self.D_j**2, self.phi_ij[0]) * 4 * pi * dD self.gd = None self.energy_only = energy_only self.timer = nulltimer if name == 'vdW-DF': assert kernel is None and Zab is None kernel = LibXC('GGA_X_PBE_R+LDA_C_PW') Zab = -0.8491 elif name == 'vdW-DF2': assert kernel is None and Zab is None kernel = LibXC('GGA_X_PW86+LDA_C_PW') Zab = -1.887 self.Zab = Zab GGA.__init__(self, kernel) self.vdwcoef = vdwcoef self.name = name self.LDAc = LibXC('LDA_C_PW') def get_setup_name(self): return 'revPBE' def initialize(self, density, hamiltonian, wfs, occupations): self.timer = wfs.timer def calculate_gga(self, e_g, n_sg, dedn_sg, sigma_xg, dedsigma_xg): GGA.calculate_gga(self, e_g, n_sg, dedn_sg, sigma_xg, dedsigma_xg) eLDAc_g = self.gd.empty() vLDAc_sg = self.gd.zeros(1) if self.vdwcoef == 0.0: return if len(n_sg) == 1: self.LDAc.calculate(eLDAc_g, n_sg, vLDAc_sg) e = self.get_non_local_energy(n_sg[0], sigma_xg[0], eLDAc_g, vLDAc_sg[0], dedn_sg[0], dedsigma_xg[0]) else: n_sg = n_sg.sum(0) n_sg.shape = (1,) + n_sg.shape self.LDAc.calculate(eLDAc_g, n_sg, vLDAc_sg) v_g = np.zeros_like(e_g) deda2nl_g = np.zeros_like(e_g) a2_g = sigma_xg[0] + 2 * sigma_xg[1] + sigma_xg[2] e = self.get_non_local_energy(n_sg[0], a2_g, eLDAc_g, vLDAc_sg[0], v_g, deda2nl_g) dedsigma_xg[0] += self.vdwcoef * deda2nl_g dedsigma_xg[1] += self.vdwcoef * 2 * deda2nl_g dedsigma_xg[2] += self.vdwcoef * deda2nl_g dedn_sg += self.vdwcoef * v_g if self.gd.comm.rank == 0: e_g[0, 0, 0] += self.vdwcoef * e / self.gd.dv def get_non_local_energy(self, n_g=None, a2_g=None, e_LDAc_g=None, v_LDAc_g=None, v_g=None, deda2_g=None): """Calculate non-local correlation energy. parameters: n_g: ndarray Density. a2_g: ndarray Absolute value of the gradient of the density - squared. e_LDAc_g: ndarray LDA correlation energy density. """ gd = self.gd n_g = n_g.clip(1e-7, np.inf) # Calculate q0 and cut it off smoothly at q0cut: kF_g = (3 * pi**2 * n_g)**(1.0 / 3.0) q0_g, dhdx_g = hRPS(kF_g - 4 * pi / 3 * e_LDAc_g / n_g - self.Zab / 36 / kF_g * a2_g / n_g**2, self.q0cut) if self.verbose: print ('VDW: q0 (min, mean, max): (%f, %f, %f)' % (q0_g.min(), q0_g.mean(), q0_g.max())) if self.soft_correction: dEcnl = gd.integrate(n_g**2 / q0_g**3) * 0.5 * self.C_soft else: dEcnl = 0.0 # Distribute density and q0 to all processors: n_g = gd.collect(n_g, broadcast=True) q0_g = gd.collect(q0_g, broadcast=True) if not self.energy_only: self.dhdx_g = gd.collect(dhdx_g, broadcast=True) Ecnl = self.calculate_6d_integral(n_g, q0_g, a2_g, e_LDAc_g, v_LDAc_g, v_g, deda2_g) return Ecnl + dEcnl def read_table(self): name = ('phi-%.3f-%.3f-%.3f-%d-%d.pckl' % (self.phi0, self.ds, self.D_j[-1], len(self.delta_i), len(self.D_j))) if 'GPAW_VDW' in os.environ: print 'Use of GPAW_VDW is deprecated.' print 'Put', name, 'in your GPAW_SETUP_PATH directory.' dirs = [os.environ['GPAW_VDW']] else: dirs = setup_paths + ['.'] for dir in dirs: filename = os.path.join(dir, name) if os.path.isfile(filename): self.phi_ij = pickle.load(open(filename)) if self.verbose: print 'VDW: using', filename return print 'VDW: Could not find table file:', name self.make_table(name) def make_table(self, name): print 'VDW: Generating vdW-DF kernel ...' print 'VDW:', ndelta = len(self.delta_i) nD = len(self.D_j) self.phi_ij = np.zeros((ndelta, nD)) for i in range(self.world.rank, ndelta, self.world.size): print ndelta - i, sys.stdout.flush() delta = self.delta_i[i] for j in range(nD - 1, -1, -1): D = self.D_j[j] d = D * (1.0 + delta) dp = D * (1.0 - delta) if d**2 + dp**2 > self.ds**2: self.phi_ij[i, j] = phi(d, dp) else: P = np.polyfit([0, self.D_j[j + 1]**2, self.D_j[j + 2]**2], [self.phi0, self.phi_ij[i, j + 1], self.phi_ij[i, j + 2]], 2) self.phi_ij[i, :j + 3] = np.polyval(P, self.D_j[:j + 3]**2) break self.world.sum(self.phi_ij) print print 'VDW: Done!' if self.world.rank == 0: pickle.dump(self.phi_ij, open(name, 'w'), pickle.HIGHEST_PROTOCOL) def make_prl_plot(self, multiply_by_4_pi_D_squared=True): import pylab as plt x = np.linspace(0, 8.0, 100) for delta in [0, 0.5, 0.9]: y = [self.phi(D * (1.0 + delta), D * (1.0 - delta)) for D in x] if multiply_by_4_pi_D_squared: y *= 4 * pi * x**2 plt.plot(x, y, label=r'$\delta=%.1f$' % delta) plt.legend(loc='best') plt.plot(x, np.zeros(len(x)), 'k-') plt.xlabel('D') plt.ylabel(r'$4\pi D^2 \phi(\rm{Hartree})$') plt.show() def phi(self, d, dp): """Kernel function. Uses bi-linear interpolation and returns zero for D > Dmax. """ P = self.phi_ij D = (d + dp) / 2.0 if D < 1e-14: return P[0, 0] if D >= self.D_j[-1]: return 0.0 delta = abs((d - dp) / (2 * D)) ddelta = self.delta_i[1] x = delta / ddelta i = int(x) if i == len(self.delta_i) - 1: i -= 1 x = 1.0 else: x -= i dD = self.D_j[1] y = D / dD j = int(y) y -= j return (x * (y * P[i + 1, j + 1] + (1 - y) * P[i + 1, j]) + (1 - x) * (y * P[i, j + 1] + (1 - y) * P[i, j])) class RealSpaceVDWFunctional(VDWFunctional): """Real-space implementation of vdW-DF.""" def __init__(self, name='vdW-DF', repeat=None, ncut=0.0005, **kwargs): """Real-space vdW-DF. parameters: repeat: 3-tuple Repeat the unit cell. ncut: float Density cutoff. """ VDWFunctional.__init__(self, name, **kwargs) self.repeat = repeat self.ncut = ncut def calculate_6d_integral(self, n_g, q0_g, a2_g=None, e_LDAc_g=None, v_LDAc_g=None, v_g=None, deda2_g=None): """Real-space double-sum.""" gd = self.gd if not gd.orthogonal: raise NotImplementedError('Real-space vdW calculations require ' + 'an orthogonal cell.') n_c = n_g.shape R_gc = np.empty(n_c + (3,)) h_c = gd.h_cv.diagonal() R_gc[..., 0] = (np.arange(0, n_c[0]) * h_c[0]).reshape((-1, 1, 1)) R_gc[..., 1] = (np.arange(0, n_c[1]) * h_c[1]).reshape((-1, 1)) R_gc[..., 2] = np.arange(0, n_c[2]) * h_c[2] mask_g = (n_g.ravel() > self.ncut) R_ic = R_gc.reshape((-1, 3)).compress(mask_g, axis=0) n_i = n_g.ravel().compress(mask_g) q0_i = q0_g.ravel().compress(mask_g) # Number of grid points: ni = len(n_i) if self.verbose: print 'VDW: number of points:', ni # Number of pairs per processor: world = self.world p = ni * (ni - 1) // 2 // world.size iA = 0 for r in range(world.size): iB = iA + int(0.5 - iA + sqrt((iA - 0.5)**2 + 2 * p)) if r == world.rank: break iA = iB assert iA <= iB if world.rank == world.size - 1: iB = ni if self.repeat is None: repeat_c = np.zeros(3, int) else: repeat_c = np.asarray(self.repeat, int) self.rhistogram = np.zeros(200) self.Dhistogram = np.zeros(200) dr = 0.05 dD = 0.05 E_vdwnl = _gpaw.vdw(n_i, q0_i, R_ic, gd.cell_cv.diagonal().copy(), gd.pbc_c, repeat_c, self.phi_ij, self.delta_i[1], self.D_j[1], iA, iB, self.rhistogram, dr, self.Dhistogram, dD) self.rhistogram *= gd.dv**2 / dr self.Dhistogram *= gd.dv**2 / dD self.world.sum(self.rhistogram) self.world.sum(self.Dhistogram) E_vdwnl = self.world.sum(E_vdwnl * gd.dv**2) return E_vdwnl class FFTVDWFunctional(VDWFunctional): """FFT implementation of vdW-DF.""" def __init__(self, name='vdW-DF', Nalpha=20, lambd=1.2, rcut=125.0, Nr=2048, size=None, **kwargs): """FFT vdW-DF. parameters: Nalpha: int Number of interpolating cubic splines. lambd: float Parameter for defining geometric series of interpolation points. rcut: float Cutoff for kernel function. Nr: int Number of real-space points for kernel function. size: 3-tuple Size of FFT-grid. """ VDWFunctional.__init__(self, name, **kwargs) self.Nalpha = Nalpha self.lambd = lambd self.rcut = rcut self.Nr = Nr self.size = size self.C_aip = None self.phi_aajp = None if Nalpha < self.world.size: rstride = self.world.size // Nalpha newranks = range(0, self.world.size, rstride)[:Nalpha] self.vdwcomm = self.world.new_communicator(newranks) # self.vdwcomm will be None for those ranks not in the communicator else: self.vdwcomm = self.world if self.vdwcomm is not None: self.alphas = [a for a in range(self.Nalpha) if (a * self.vdwcomm.size // self.Nalpha == self.vdwcomm.rank)] else: self.alphas = [] def construct_cubic_splines(self): """Construc interpolating splines for q0. The recipe is from http://en.wikipedia.org/wiki/Spline_(mathematics) """ n = self.Nalpha lambd = self.lambd q1 = self.q0cut * (lambd - 1) / (lambd**(n - 1) - 1) q = q1 * (lambd**np.arange(n) - 1) / (lambd - 1) if self.verbose: print ('VDW: using %d cubic splines: 0.00, %.2f, ..., %.2f, %.2f' % (n, q1, q[-2], q[-1])) y = np.eye(n) a = y h = q[1:] - q[:-1] alpha = 3 * ((a[2:] - a[1:-1]) / h[1:, np.newaxis] - (a[1:-1] - a[:-2]) / h[:-1, np.newaxis]) l = np.ones((n, n)) mu = np.zeros((n, n)) z = np.zeros((n, n)) for i in range(1, n - 1): l[i] = 2 * (q[i + 1] - q[i - 1]) - h[i - 1] * mu[i - 1] mu[i] = h[i] / l[i] z[i] = (alpha[i - 1] - h[i - 1] * z[i - 1]) / l[i] b = np.zeros((n, n)) c = np.zeros((n, n)) d = np.zeros((n, n)) for i in range(n - 2, -1, -1): c[i] = z[i] - mu[i] * c[i + 1] b[i] = (a[i + 1] - a[i]) / h[i] - h[i] * (c[i + 1] + 2 * c[i]) / 3 d[i] = (c[i + 1] - c[i]) / 3 / h[i] self.C_aip = np.zeros((n, n, 4)) self.C_aip[:, :-1, 0] = a[:-1].T self.C_aip[:, :-1, 1] = b[:-1].T self.C_aip[:, :-1, 2] = c[:-1].T self.C_aip[:, :-1, 3] = d[:-1].T self.C_aip[-1, -1, 0] = 1.0 self.q_a = q def p(self, alpha, q): """Interpolating spline.""" i = int(log(q / self.q_a[1] * (self.lambd - 1) + 1) / log(self.lambd)) a, b, c, d = self.C_aip[alpha, i] dq = q - self.q_a[i] return a + dq * (b + dq * (c + dq * d)) def construct_fourier_transformed_kernels(self): self.phi_aajp = phi_aajp = {} M = self.Nr rcut = self.rcut r_g = np.linspace(0, rcut, M, 0) k_j = np.arange(M // 2) * (2 * pi / rcut) if self.verbose: print ("VDW: cutoff for fft'ed kernel: %.3f Hartree" % (0.5 * k_j[-1]**2)) for a in range(self.Nalpha): qa = self.q_a[a] for b in range(a, self.Nalpha): qb = self.q_a[b] phi_g = [self.phi(qa * r, qb * r) for r in r_g] phi_j = (fft(r_g * phi_g * 1j).real[:M // 2] * (rcut / M * 4 * pi)) phi_j[0] = np.dot(r_g, r_g * phi_g) * (rcut / M * 4 * pi) phi_j[1:] /= k_j[1:] phi_aajp[a, b] = phi_aajp[b, a] = spline(k_j, phi_j) def set_grid_descriptor(self, gd): if (self.gd is not None and (self.gd.N_c == gd.N_c).all() and (self.gd.pbc_c == gd.pbc_c).all() and (self.gd.cell_cv == gd.cell_cv).all()): return VDWFunctional.set_grid_descriptor(self, gd) if self.size is None: self.shape = gd.N_c.copy() for c, n in enumerate(self.shape): if not gd.pbc_c[c]: self.shape[c] = int(2**ceil(log(n) / log(2))) else: self.shape = np.array(self.size) scale_c1 = (self.shape / (1.0 * gd.N_c))[:, np.newaxis] gdfft = GridDescriptor(self.shape, gd.cell_cv * scale_c1, True) k_k = construct_reciprocal(gdfft)[0][:, :, :self.shape[2] // 2 + 1]**0.5 k_k[0, 0, 0] = 0.0 self.dj_k = k_k / (2 * pi / self.rcut) self.j_k = self.dj_k.astype(int) self.dj_k -= self.j_k self.dj_k *= 2 * pi / self.rcut assert self.j_k.max() < self.Nr // 2, 'Use larger Nr.' if self.verbose: print 'VDW: density array size:', gd.get_size_of_global_array() print 'VDW: zero-padded array size:', self.shape print ('VDW: maximum kinetic energy: %.3f Hartree' % (0.5 * k_k.max()**2)) def calculate_6d_integral(self, n_g, q0_g, a2_g=None, e_LDAc_g=None, v_LDAc_g=None, v_g=None, deda2_g=None): self.timer.start('VdW-DF integral') self.timer.start('splines') if self.C_aip is None: self.construct_cubic_splines() self.construct_fourier_transformed_kernels() self.timer.stop('splines') gd = self.gd N = self.Nalpha world = self.world vdwcomm = self.vdwcomm if self.alphas: self.timer.start('hmm1') i_g = (np.log(q0_g / self.q_a[1] * (self.lambd - 1) + 1) / log(self.lambd)).astype(int) dq0_g = q0_g - self.q_a[i_g] self.timer.stop('hmm1') else: i_g = None dq0_g = None if self.verbose: print 'VDW: fft:', theta_ak = {} p_ag = {} for a in self.alphas: self.timer.start('hmm2') C_pg = self.C_aip[a, i_g].transpose((3, 0, 1, 2)) pa_g = (C_pg[0] + dq0_g * (C_pg[1] + dq0_g * (C_pg[2] + dq0_g * C_pg[3]))) self.timer.stop('hmm2') del C_pg self.timer.start('FFT') theta_ak[a] = rfftn(n_g * pa_g, self.shape).copy() if extra_parameters.get('vdw0'): theta_ak[a][0, 0, 0] = 0.0 self.timer.stop() if not self.energy_only: p_ag[a] = pa_g del pa_g if self.verbose: print a, sys.stdout.flush() if self.energy_only: del i_g del dq0_g if self.verbose: print print 'VDW: convolution:', F_ak = {} dj_k = self.dj_k energy = 0.0 for a in range(N): if vdwcomm is not None: vdw_ranka = a * vdwcomm.size // N F_k = np.zeros((self.shape[0], self.shape[1], self.shape[2] // 2 + 1), complex) self.timer.start('Convolution') for b in self.alphas: _gpaw.vdw2(self.phi_aajp[a, b], self.j_k, dj_k, theta_ak[b], F_k) self.timer.stop() if vdwcomm is not None: self.timer.start('gather') vdwcomm.sum(F_k, vdw_ranka) self.timer.stop('gather') if vdwcomm is not None and vdwcomm.rank == vdw_ranka: if not self.energy_only: F_ak[a] = F_k energy += np.vdot(theta_ak[a][:, :, 0], F_k[:, :, 0]).real energy += np.vdot(theta_ak[a][:, :, -1], F_k[:, :, -1]).real energy += 2 * np.vdot(theta_ak[a][:, :, 1:-1], F_k[:, :, 1:-1]).real if self.verbose: print a, sys.stdout.flush() del theta_ak if self.verbose: print if not self.energy_only: F_ag = {} for a in self.alphas: n1, n2, n3 = gd.get_size_of_global_array() self.timer.start('iFFT') F_ag[a] = irfftn(F_ak[a]).real[:n1, :n2, :n3].copy() self.timer.stop() del F_ak self.timer.start('potential') self.calculate_potential(n_g, a2_g, i_g, dq0_g, p_ag, F_ag, e_LDAc_g, v_LDAc_g, v_g, deda2_g) self.timer.stop() self.timer.stop() return 0.5 * world.sum(energy) * gd.dv / self.shape.prod() def calculate_potential(self, n_g, a2_g, i_g, dq0_g, p_ag, F_ag, e_LDAc_g, v_LDAc_g, v_g, deda2_g): N = self.Nalpha world = self.world self.timer.start('collect') a2_g = self.gd.collect(a2_g, broadcast=True) e_LDAc_g = self.gd.collect(e_LDAc_g, broadcast=True) v_LDAc_g = self.gd.collect(v_LDAc_g, broadcast=True) self.timer.stop('collect') if self.alphas: self.timer.start('p1') dq0dn_g = ((pi / 3 / n_g)**(2.0 / 3.0) + 4 * pi / 3 * (e_LDAc_g / n_g - v_LDAc_g) / n_g + 7 * self.Zab / 108 / (3 * pi**2)**(1.0 / 3.0) * a2_g * n_g**(-10.0 / 3.0)) dq0da2_g = -(self.Zab / 36 / (3 * pi**2)**(1.0 / 3.0) / n_g**(7.0 / 3.0)) self.timer.stop('p1') v0_g = np.zeros_like(n_g) deda20_g = np.zeros_like(n_g) for a in self.alphas: self.timer.start('p2') C_pg = self.C_aip[a, i_g].transpose((3, 0, 1, 2)) dpadq0_g = C_pg[1] + dq0_g * (2 * C_pg[2] + 3 * dq0_g * C_pg[3]) del C_pg dthetatmp_g = n_g * dpadq0_g * self.dhdx_g dthetaadn_g = p_ag[a] + dq0dn_g * dthetatmp_g v0_g += dthetaadn_g * F_ag[a] del dthetaadn_g dthetaada2_g = dq0da2_g * dthetatmp_g del dthetatmp_g deda20_g += dthetaada2_g * F_ag[a] del dthetaada2_g self.timer.stop('p2') self.timer.start('sum') world.sum(v0_g) world.sum(deda20_g) self.timer.stop('sum') slice = self.gd.get_slice() v_g += v0_g[slice] deda2_g += deda20_g[slice] def spline(x, y): n = len(y) result = np.zeros((n, 4)) a, b, c, d = result.T a[:] = y h = x[1:] - x[:-1] alpha = 3 * ((a[2:] - a[1:-1]) / h[1:] - (a[1:-1] - a[:-2]) / h[:-1]) l = np.ones(n) mu = np.zeros(n) z = np.zeros(n) for i in range(1, n - 1): l[i] = 2 * (x[i + 1] - x[i - 1]) - h[i - 1] * mu[i - 1] mu[i] = h[i] / l[i] z[i] = (alpha[i - 1] - h[i - 1] * z[i - 1]) / l[i] for i in range(n - 2, -1, -1): c[i] = z[i] - mu[i] * c[i + 1] b[i] = (a[i + 1] - a[i]) / h[i] - h[i] * (c[i + 1] + 2 * c[i]) / 3 d[i] = (c[i + 1] - c[i]) / 3 / h[i] return result
qsnake/gpaw
gpaw/xc/vdw.py
Python
gpl-3.0
25,531
[ "GPAW" ]
d16c5e4365800b157e87967e463de3aab76a24830d8dd918b9910e420e95800e
__version__ = '0.8.0' import kivy from kivy.utils import platform from kivy.app import App from kivy.uix.floatlayout import FloatLayout from kivy.uix.button import Button from kivy.uix.popup import Popup from kivy.uix.label import Label from kivy.uix.image import Image from kivy.core.audio import SoundLoader from kivy.core.window import Window from kivy.clock import Clock from threading import Thread import hero_voices import random import time import os import logging logger = logging.getLogger('spam_application') fh = logging.FileHandler('data/guessthehero.log') fh.setLevel(logging.DEBUG) logger.addHandler(fh) try: from urllib.request import urlretrieve except: from urllib import urlretrieve hero_names_rads = ['Earthshaker', 'Sven', 'Tiny', 'Kunkka', 'Beastmaster', 'Dragon_Knight', 'Clockwerk', 'Omniknight', 'Huskar', 'Alchemist', 'Brewmaster', 'Treant_Protector', 'Io', 'Centaur_Warrunner', 'Timbersaw', 'Bristleback', 'Tusk', 'Elder_Titan', 'Legion_Commander', 'Earth_Spirit', 'Phoenix'] hero_names_rada = ['Anti-Mage', 'Drow_Ranger', 'Juggernaut', 'Mirana', 'Morphling', 'Phantom_Lancer', 'Vengeful_Spirit', 'Riki', 'Sniper', 'Templar_Assassin', 'Luna', 'Bounty_Hunter', 'Ursa', 'Gyrocopter', 'Lone_Druid', 'Naga_Siren', 'Troll_Warlord', 'Ember_Spirit'] hero_names_radi = ['Crystal_Maiden', 'Puck', 'Tinker', 'Windranger', 'Zeus', 'Storm_Spirit', 'Lina', 'Shadow_Shaman', 'Natures_Prophet', 'Enchantress', 'Jakiro', 'Chen', 'Silencer', 'Ogre_Magi', 'Rubick', 'Disruptor', 'Keeper_of_the_Light', 'Skywrath_Mage'] hero_names_dirs = ['Axe', 'Pudge', 'Sand_King', 'Slardar', 'Tidehunter', 'Wraith_King', 'Lifestealer', 'Night_Stalker', 'Doom', 'Spirit_Breaker', 'Lycan', 'Chaos_Knight', 'Undying', 'Magnus', 'Abaddon'] hero_names_dira = ['Bloodseeker', 'Shadow_Fiend', 'Razor', 'Venomancer', 'Faceless_Void', 'Phantom_Assassin', 'Viper', 'Clinkz', 'Broodmother', 'Weaver', 'Spectre', 'Meepo', 'Nyx_Assassin', 'Slark', 'Medusa', 'Terrorblade'] hero_names_diri = ['Bane', 'Lich', 'Lion', 'Witch_Doctor', 'Enigma', 'Necrophos', 'Warlock', 'Queen_of_Pain', 'Death_Prophet', 'Pugna', 'Dazzle', 'Leshrac', 'Dark_Seer', 'Batrider', 'Ancient_Apparition', 'Invoker', 'Outworld_Devourer', 'Shadow_Demon', 'Visage'] # All heroes separated by category all_heroes = [hero_names_rads, hero_names_rada, hero_names_radi, hero_names_dirs, hero_names_dira, hero_names_diri] deletables = [] base_points = 10 base_lose_points = 40 class MenuUI(FloatLayout): def __init__(self, **kwargs): super(MenuUI, self).__init__(**kwargs) print('_____going to ask_______') app.ask_google_play() if platform == 'android': app.use_google_play = app.config.getint('play', 'use_google_play') if app.use_google_play: app.activate_google_play() #gs_android.setup(self) else: pass #popup = GooglePlayPopup() #popup.open() #Clock.schedule_once(app.ask_google_play, .5) def start_game(self): self.ids.sg_button.opacity = 0 self.ids.sg_button.disabled = True self.main = MainUI(self) self.add_widget(self.main) def remove_instance(self): self.remove_widget(self.main) self.main = None self.__init__() kill_phrases = ['Ownage!', 'Double Tap!', 'Killing Spree!', 'Ultra Kill', 'Rampage!'] platform = platform() if platform == 'android': import gs_android achievementKillingSpree="CgkI--nlpJAYEAIQAg" achievementDominating="CgkI--nlpJAYEAIQAw" achievementMegaKill="CgkI--nlpJAYEAIQBA" achievementUnstoppable="CgkI--nlpJAYEAIQBQ" achievementWickedSick="CgkI--nlpJAYEAIQBg" achievementMonsterKill="CgkI--nlpJAYEAIQBw" achievementGodlike="CgkI--nlpJAYEAIQCA" achievementBeyondGodlike="CgkI--nlpJAYEAIQCQ" leaderboardScore="CgkI--nlpJAYEAIQAA" achievements = {'Killing_Spree':achievementKillingSpree, 'Dominating':achievementDominating, 'Mega_Kill':achievementMegaKill, 'Unstoppable':achievementUnstoppable, 'Wicked_Sick':achievementWickedSick, 'Monster_Kill':achievementMonsterKill, 'Godlike':achievementGodlike, 'Beyond_Godlike':achievementGodlike} class GooglePlayPopup(Popup): def __init__(self, **kwargs): super(GooglePlayPopup, self).__init__(**kwargs) #def activate_google_play(self): # self.main_ui.activate_google_play() class MainUI(FloatLayout): def __init__(self, menu, **kwargs): super(MainUI, self).__init__(**kwargs) self.previous_buttons = list() self.seconds = 10 self.score = 0 self.time = 0 self.wins = 0 self.lives = 3 self.sound = SoundLoader.load('data/sounds/match_ready_no_focus.wav') self.prepare_clock() self.next_selected, self.next_winner = self.choose_hero( random.choice(all_heroes)) self.download_next_sound() self.menu = menu # self.ids.label_lives.text=str(self.lives+1) self.show_lives() app.use_google_play = 0#1 def show_lives(self): self.ids.box_lives.clear_widgets() for i in range(self.lives): life = Image( source='data/images/heart_of_tarrasque.png', size_hint=(0.8, 0.8)) self.ids.box_lives.add_widget(life) def prepare_clock(self): self.time = 3 Clock.schedule_once(self.start, 3) Clock.schedule_interval(self.update_time, 0.1) self.ids.winlose_label.text = 'Preparing...' def start(self, *args): self.load_next(True) Clock.schedule_interval(self.load_next, self.seconds) Clock.unschedule(self.update_time) Clock.schedule_interval(self.update_time, 0.1) def update_time(self, *args): self.time = self.time - 0.1 if self.time >= 0: self.ids.label_time.text = str(self.time) self.ids.pbar.value = self.time def stop_time(self, *args): Clock.unschedule(self.update_time) Clock.unschedule(self.load_next) def download_next_sound(self): for hero_voice in hero_voices.voices: if self.next_winner in hero_voice['name']: link = random.choice(hero_voice['voices']) name = link.rsplit('/', 1)[1] if name != deletables[0:]: deletables.append(name) print(deletables) try: if len(deletables) >= 4: os.remove( os.path.join('data/sounds/voices/', deletables[0])) deletables[0] = None del(deletables[0]) urlretrieve( link, os.path.join('data/sounds/voices/', name)) except Exception as e: logger.error( '%s - Error downloading file, most likely without internet connection' % time.asctime()) def load_next(self, *args): for arg in args: if arg == True: pass else: # Downgrades score when not clicking anything self.downgrade_score() # This method does not actually destroy the instance unless the # lives are under 0 self.destroy_instance() self.ids.winlose_label.text = '' if self.previous_buttons: for button in self.previous_buttons: self.ids.options_layout.remove_widget(button) del button self.winner = self.next_winner self.selected = self.next_selected self.next_selected, self.next_winner = self.choose_hero( random.choice(all_heroes)) self.tr = Thread( target=self.download_next_sound, name='Download_Thread') self.tr.start() self.create_buttons() self.play_winner_sound() self.time = self.seconds self.ids.question_image.source = 'data/images/question_mark.png' def show_popup(self, title, text): popup = Popup( title=title, content=Label(text=text), size_hint=(0.6, 0.4)) popup.open() def update_score(self): self.ids.label_score.text = str(self.score) def destroy_instance(self): if self.lives < 0: self.show_popup('GG', 'GAME OVER') self.stop_time() self.tr.join() self.menu.remove_instance() def button_click(self, name): # Right: if self.winner == name: self.wins = self.wins + 1 # Should implement: the faster you click, more points (multiply # remaining time by standard points) self.ids.question_image.source = 'data/images/%s.png' % self.winner if self.time > 0: self.upgrade_score() else: self.score = self.score + base_points self.update_score() #self.show_popup('Yay!', 'You win!') for bt in self.previous_buttons: bt.disabled = True if self.sound: self.sound.play() self.stop_time() self.prepare_clock() # Wrong: else: # TODO: set error sound and implement a nicer error punishment # engine (something like the time goes down a couple of seconds # every time the wrong hero is selected self.ids.winlose_label.text = 'NO!' self.downgrade_score() self.wins = 0 self.update_score() self.destroy_instance() def upgrade_score(self): self.score = self.score + (base_points * self.time) self.update_score() def downgrade_score(self): self.score = self.score - (base_lose_points * (10 - self.time)) self.lives = self.lives - 1 self.show_lives() #self.ids.box_lives.clear_widgets() if self.score < 0: self.score = 0 self.update_score() def play_winner_sound(self): for hero_voice in hero_voices.voices: if self.winner in hero_voice['name']: try: #link = random.choice(hero_voice['voices']) #name = link.rsplit('/', 1)[1] name = deletables[len(deletables) - 2] if name: sound_path = os.path.join( 'data/sounds/voices/', name) sound = SoundLoader.load(sound_path) except Exception as e: self.show_popup('Error', e.message) logger.error(e.message) link = random.choice(hero_voice['voices']) sound = SoundLoader.load(link) if sound: try: sound.play() except Exception as e: logger.error(e.message) def create_buttons(self): #---------- Change buttons ---------------- self.previous_buttons = [] for name in self.selected: bt = Button(text=name.replace( '_', ' '), background_normal='data/images/button_off.png', background_down='data/images/button_on.png') bt.bind(on_press=lambda x, hero=name: self.button_click(hero)) self.previous_buttons.append(bt) self.ids['options_layout'].add_widget(bt) #------------------------------------------ def choose_hero(self, hero_names): # Select winner ------------ selected = random.sample(hero_names, 4) winner = random.choice(selected) return selected, winner class GuessTheHeroApp(App): use_kivy_settings = False def build_config(self, config): if platform == 'android': config.setdefaults('play', {'use_google_play': '0'}) def build(self): global app app = self self.icon = 'data/images/dota_icon.png' self.menu = MenuUI() return self.menu def gs_upload_score(self, score): if platform == 'android' and self.use_google_play: gs_android.leaderboard(leaderboardScore, score) def activate_google_play(self): self.config.set('play', 'use_google_play', '1') self.config.write() self.use_google_play = 1 gs_android.setup(self) def gs_show_leaderboard(self): if platform == 'android': if self.use_google_play: gs_android.show_leaderboard(leaderboardScore) else: self.ask_google_play() def ask_google_play(self, *args): popup = GooglePlayPopup() popup.open() def on_pause(self): if self.menu.main: self.menu.main.stop_time() return True def on_resume(self): if self.menu.main: self.menu.main.start() if __name__ == '__main__': GuessTheHeroApp().run()
ROAND/guessthehero
.buildozer/android/app/main.py
Python
gpl-3.0
13,356
[ "TINKER" ]
ced9ee0ac01edf4ceed1283dc97004215829206f9b0dbacfb1057bd6fe6039c0
# -*- Mode: Python; coding: utf-8 -*- # vi:si:et:sw=4:sts=4:ts=4 ## ## Copyright (C) 2012 Async Open Source <http://www.async.com.br> ## All rights reserved ## ## This program is free software; you can redistribute it and/or modify ## it under the terms of the GNU Lesser General Public License as published by ## the Free Software Foundation; either version 2 of the License, or ## (at your option) any later version. ## ## This program is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## ## You should have received a copy of the GNU Lesser General Public License ## along with this program; if not, write to the Free Software ## Foundation, Inc., or visit: http://www.gnu.org/. ## ## Author(s): Stoq Team <stoq-devel@async.com.br> ## import gtk import webbrowser def open_browser(uri, screen=None): try: gtk.show_uri(screen, uri, gtk.gdk.CURRENT_TIME) except Exception: webbrowser.open(uri, new=True)
andrebellafronte/stoq
stoqlib/gui/utils/openbrowser.py
Python
gpl-2.0
1,114
[ "VisIt" ]
d129a3d3218a2c93c70565f720629f07e37cb73b7f005c5436f2280b2bc47984
""" Tests for the WebAppTest class. """ import os from unittest import expectedFailure from bok_choy.web_app_test import WebAppTest from .pages import ImagePage class ScreenshotAssertTest(WebAppTest): """ Test the integration with needle and its screenshot assertion capability. """ def test_needle_screenshot_success(self): """ Test the integration with needle to capture and assert on a screenshot of an element. Note that the baseline_directory is computed in the __init__ method of NeedleTestCase, so we can monkeypatch it here in the testcase itself. """ self.baseline_directory = os.path.realpath(os.path.join(os.getcwd(), 'tests', 'baseline')) self.page = ImagePage(self.browser).visit() self.assertScreenshot('#green_check', 'correct-icon') @expectedFailure def test_needle_screenshot_failure(self): """ Test the integration with needle to capture and assert on a screenshot of an element. Note that the baseline_directory is computed in the __init__ method of NeedleTestCase, so we can monkeypatch it here in the testcase itself. """ self.baseline_directory = os.path.realpath(os.path.join(os.getcwd(), 'tests', 'baseline')) self.page = ImagePage(self.browser).visit() self.assertScreenshot('#green_check', 'incorrect-icon')
drptbl/bok-choy
tests/test_webapptest.py
Python
apache-2.0
1,392
[ "VisIt" ]
0da1e02a0246fa973e38a489f55295110f64bd822aca995978fdb22394f2fef4
#!/usr/bin/python # coding=utf-8 import os import os.path import sqlite3 import sys import unicodedata ATOM_NS = '{http://www.w3.org/2005/Atom}' G_NS = '{http://schemas.google.com/g/2005}' GC_NS = '{http://schemas.google.com/contact/2008}' cacheroot = os.path.expanduser('~/.cache/simplegab/') database = cacheroot + 'adresses.db' xmlcache = '' if not os.path.exists(cacheroot): os.makedirs(cacheroot) def _normalize(string): '''Dummy casefold with normalization. Could be better to support cases like ü->ue''' return ''.join(c for c in unicodedata.normalize('NFKD', unicode(string)) if not unicodedata.combining(c)).lower() def _email(e, f): title = e.findtext(ATOM_NS + 'title') nickname = e.findtext(GC_NS + 'nickname', default='') email = f.get('address') kind = f.get('label') or f.get('rel').split('#')[-1] fulltext = _normalize('%s "%s" <%s> (%s)' % (title, nickname, email, kind)) return (title, nickname, email, kind, fulltext,) def updatedb(xmlfile): if xmlfile: xml = open(xmlfile, 'r').read() else: import httplib2 import logging from oauth2client.file import Storage from oauth2client.client import OAuth2WebServerFlow logging.basicConfig(level=logging.ERROR) storage = Storage(cacheroot + 'credentials.json') credentials = storage.get() if not credentials: oaclient = OAuth2WebServerFlow( '198676491087-pbh0275v2d29mthsnftkgrmnt9h4ptjq.apps.googleusercontent.com', 'lXRf0vfwAWnX9Y-OQEJZlnu3', 'https://www.googleapis.com/auth/contacts.readonly', 'urn:ietf:wg:oauth:2.0:oob', 'contactssync') print('Visit the following URL in your browser to authorise:') print(oaclient.step1_get_authorize_url()) auth_code = raw_input('Copy the authorization code from the browser: ') credentials = oaclient.step2_exchange(auth_code) storage.put(credentials) http = httplib2.Http() credentials.authorize(http) (headers, xml) = http.request('https://www.google.com/m8/feeds/contacts/default/full?max-results=2000&v=3.0', 'GET') if xmlcache: open(xmlcache, 'w').write(xml) cu.execute('CREATE TABLE IF NOT EXISTS addresses (title TEXT, nickname TEXT, email TEXT, kind TEXT, fulltext TEXT)') cu.execute('DELETE FROM addresses') import xml.etree.ElementTree as ET for e in ET.fromstring(xml).findall(ATOM_NS + 'entry'): # TODO skip groupless if not e.find(ATOM_NS + 'category').get('term').endswith('contact'): continue cu.executemany('INSERT INTO addresses VALUES (?,?,?,?,?)', (_email(e, f) for f in e.findall(G_NS + 'email'))) cx.commit() def query(query): tokens = [_normalize(unicode(s, 'utf-8')) for s in query.split()] query = 'SELECT email, title, kind FROM addresses WHERE 1' + (' AND fulltext LIKE ?' * len(tokens)) cu.execute(query, ['%%%s%%' % t for t in tokens]) print('\n' + '\n'.join('\t'.join(r) for r in cu.fetchall())), cx = sqlite3.connect(database) cu = cx.cursor() if len(sys.argv) >= 2 and sys.argv[1] == 'update': updatedb(sys.argv[2] if len(sys.argv) > 2 else None) elif len(sys.argv) == 3 and sys.argv[1] == 'query': query(sys.argv[2]) else: print('Usage: simplegab.py update [file.xml]') print(' simplegab.py query tokens')
mh21/simplegab
simplegab.py
Python
mit
3,503
[ "VisIt" ]
10860311d17dd80a43d2c60f65dc761bfe0edb4ccff21c3bff7e2894adb58281
#!/usr/bin/env python # -*- coding: latin-1 -*- # # Copyright 2018-2021 Blaise Frederick # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # import copy import getopt import os import platform import sys import time import warnings import numpy as np import scipy as sp from matplotlib.pyplot import figure, plot, show from scipy.signal import savgol_filter, welch from scipy.stats import kurtosis, skew from statsmodels.robust import mad import rapidtide.correlate as tide_corr import rapidtide.filter as tide_filt import rapidtide.fit as tide_fit import rapidtide.glmpass as tide_glmpass import rapidtide.helper_classes as tide_classes import rapidtide.io as tide_io import rapidtide.miscmath as tide_math import rapidtide.multiproc as tide_multiproc import rapidtide.resample as tide_resample import rapidtide.stats as tide_stats import rapidtide.util as tide_util warnings.simplefilter(action="ignore", category=FutureWarning) try: import mkl mklexists = True except ImportError: mklexists = False try: import rapidtide.dlfilter as tide_dlfilt dlfilterexists = True print("dlfilter exists") except ImportError: dlfilterexists = False print("dlfilter does not exist") def usage(): print( os.path.basename(sys.argv[0]), "- Hypersampling by Analytic Phase Projection - Yay!", ) print("") print("usage: ", os.path.basename(sys.argv[0]), " fmrifile slicetimefile outputroot") print("") print("required arguments:") print(" fmrifile: - NIFTI file containing BOLD fMRI data") print( " slicetimefile: - Text file containing the offset time in seconds of each slice relative" ) print( " to the start of the TR, one value per line, OR the BIDS sidecar JSON file" ) print(" for the fmrifile (contains the SliceTiming field") print(" outputroot: - Base name for all output files") print("") print("optional arguments:") print("") print("Processing steps:") print( " --cardcalconly - Stop after all cardiac regressor calculation steps (before phase projection)." ) print( " --dodlfilter - Refine cardiac waveform from the fMRI data using a deep learning filter." ) print( " NOTE: this will only work if you have a working Keras installation;" ) print(" if not, this option is ignored.") print( " OTHER NOTE: Some versions of tensorflow seem to have some weird conflict" ) print( " with MKL which I can't seem to be able to fix. If the dl filter bombs" ) print( " complaining about multiple openmp libraries, try rerunning with the" ) print( " secret and inadvisable '--usesuperdangerousworkaround' flag. Good luck!" ) print( " --model=MODELNAME - Use model MODELNAME for dl filter (default is model_revised - from the revised NeuroImage paper.)" ) print( " --spatialglm - Generate voxelwise aliased synthetic cardiac regressors and filter" ) print(" them out") print( " --temporalglm - Generate synthetic cardiac signal maps for each timepoint and filter" ) print(" them out") print("") print("Performance:") print( " --mklthreads=NTHREADS - Use NTHREADS MKL threads to accelerate processing (defaults to 1 - more" ) print( " threads up to the number of cores can accelerate processing a lot, but" ) print( " can really kill you on clusters unless you're very careful. Use at your" ) print(" own risk.)") print("") print("Preprocessing:") print( " --numskip=SKIP - Skip SKIP tr's at the beginning of the fMRI file (default is 0)." ) print( " --motskip=SKIP - Skip SKIP tr's at the beginning of the motion regressor file (default is 0)." ) print( " --motionfile=MOTFILE[:COLSPEC] - Read 6 columns of motion regressors out of MOTFILE text file." ) print( " (with timepoints rows) and regress them, their derivatives, " ) print( " and delayed derivatives out of the data prior to analysis." ) print( " If COLSPEC is present, use the comma separated list of ranges to" ) print( " specify X, Y, Z, RotX, RotY, and RotZ, in that order. For" ) print( " example, :3-5,7,0,9 would use columns 3, 4, 5, 7, 0 and 9" ) print(" for X, Y, Z, RotX, RotY, RotZ, respectively") print( " --motionhp=HPFREQ - Highpass filter motion regressors to HPFREQ Hz prior to regression" ) print( " --motionlp=LPFREQ - Lowpass filter motion regressors to HPFREQ Hz prior to regression" ) print("") print("Cardiac estimation tuning:") print( " the generation of the cardiac waveform (default is no variance masking.)" ) print( " --estmask=MASKNAME - Generation of cardiac waveform from data will be restricted to" ) print( " voxels in MASKNAME and weighted by the mask intensity. If this is " ) print( " selected, happy will only make a single pass through the data (the" ) print( " initial vessel mask generation pass will be skipped)." ) print( " --minhr=MINHR - Limit lower cardiac frequency search range to MINHR BPM (default is 40)" ) print( " --maxhr=MAXHR - Limit upper cardiac frequency search range to MAXHR BPM (default is 140)" ) print( " --minhrfilt=MINHR - Highpass filter cardiac waveform estimate to MINHR BPM (default is 40)" ) print( " --maxhrfilt=MAXHR - Lowpass filter cardiac waveform estimate to MAXHR BPM (default is 1000)" ) print( " --envcutoff=CUTOFF - Lowpass filter cardiac normalization envelope to CUTOFF Hz (default is 0.4)" ) print( " --notchwidth=WIDTH - Set the width of the notch filter, in percent of the notch frequency" ) print(" (default is 1.5)") print("") print("External cardiac waveform options:") print( " --cardiacfile=FILE[:COL] - Read the cardiac waveform from file FILE. If COL is an integer," ) print( " and FILE is a text file, use the COL'th column. If FILE is a BIDS " ) print( " format json file, use column named COL. If no file is specified, " ) print(" estimate the cardiac signal from the fMRI data.") print( " --cardiacfreq=FREQ - Cardiac waveform in cardiacfile has sample frequency FREQ " ) print( " (default is 32Hz). NB: --cardiacfreq and --cardiactstep" ) print(" are two ways to specify the same thing") print( " --cardiactstep=TSTEP - Cardiac waveform in file has sample time step TSTEP " ) print( " (default is 0.03125s) NB: --cardiacfreq and --cardiactstep" ) print(" are two ways to specify the same thing") print( " --cardiacstart=START - The time delay in seconds into the cardiac file, corresponding" ) print(" in the first TR of the fMRI file (default is 0.0)") print( " --stdfreq=FREQ - Frequency to which the cardiac signals are resampled for output." ) print(" Default is 25.") print( " --forcehr=BPM - Force heart rate fundamental detector to be centered at BPM" ) print( " (overrides peak frequencies found from spectrum). Useful" ) print( " if there is structured noise that confuses the peak finder." ) print("") print("Phase projection tuning:") print(" --outputbins=BINS - Number of output phase bins (default is 32)") print( " --gridbins=BINS - Width of the gridding kernel in output phase bins (default is 3.0)" ) print( " --gridkernel=KERNEL - Convolution gridding kernel. Options are 'old', 'gauss', and 'kaiser'" ) print(" (default is 'kaiser')") print( " --projmask=MASKNAME - Phase projection will be restricted to voxels in MASKNAME" ) print(" (overrides normal intensity mask.)") print( " --projectwithraw - Use fMRI derived cardiac waveform as phase source for projection, even" ) print(" if a plethysmogram is supplied") print( " --fliparteries - Attempt to detect arterial signals and flip over the timecourses after" ) print( " phase projection (since relative arterial blood susceptibility is" ) print(" inverted relative to venous blood).") # print(" --arteriesonly - Restrict cardiac waveform estimation to putative arteries only.") print("") print("Debugging arguments (probably not of interest to users):") print( " --aliasedcorrelation - Attempt to calculate absolute delay using an aliased correlation (experimental)." ) print( " --noprogressbar - Disable progress bars - useful if saving output to files" ) print(" --debug - Turn on debugging information") print( " --increaseoutputlevel - Increase the output level to output more intermediate files (default=1)" ) print( " --decreaseoutputlevel - Decrease the output level to output fewer intermediate files (default=1)" ) print(" --nodetrend - Disable data detrending") print( " --noorthog - Disable orthogonalization of motion confound regressors" ) print(" --disablenotch - Disable subharmonic notch filter") print( " --nomask - Disable data masking for calculating cardiac waveform" ) print( " --nocensor - Bad points will not be excluded from analytic phase projection" ) print(" --noappsmooth - Disable smoothing app file in the phase direction") print( " --nophasefilt - Disable the phase trend filter (probably not a good idea)" ) print( " --nocardiacalign - Disable alignment of pleth signal to fMRI derived cardiac signal." ) print(" to blood vessels") print( " --saveinfoasjson - Save the info file in json format rather than text. Will eventually" ) print( " --trimcorrelations - Some physiological timecourses don't cover the entire length of the" ) print( " fMRI experiment. Use this option to trim other waveforms to match " ) print(" when calculating correlations.") print( " --saveintermediate - Save some data from intermediate passes to help debugging." ) return () def rrifromphase(timeaxis, thephase): return None def cardiacsig(thisphase, amps=[1.0, 0.0, 0.0], phases=None, overallphase=0.0): total = 0.0 if phases is None: phases = amps * 0.0 for i in range(len(amps)): total += amps[i] * np.cos((i + 1) * thisphase + phases[i] + overallphase) return total def physiofromimage( normdata_byslice, mask_byslice, numslices, timepoints, tr, slicetimes, cardprefilter, respprefilter, notchpct=1.5, madnorm=True, nprocs=1, arteriesonly=False, fliparteries=False, debug=False, appflips_byslice=None, verbose=False, usemask=True, multiplicative=True, ): # find out what timepoints we have, and their spacing numsteps, minstep, sliceoffsets = tide_io.sliceinfo(slicetimes, tr) print( len(slicetimes), "slice times with", numsteps, "unique values - diff is", minstep, ) # make sure there is an appflips array if appflips_byslice is None: appflips_byslice = mask_byslice * 0.0 + 1.0 else: if arteriesonly: appflips_byslice[np.where(appflips_byslice > 0.0)] = 0.0 # make slice means print("making slice means...") hirestc = np.zeros((timepoints * numsteps), dtype=np.float64) cycleaverage = np.zeros((numsteps), dtype=np.float64) sliceavs = np.zeros((numslices, timepoints), dtype=np.float64) slicenorms = np.zeros((numslices), dtype=np.float64) if not verbose: print("averaging slices...") if fliparteries: thismask_byslice = appflips_byslice.astype(np.int64) * mask_byslice else: thismask_byslice = mask_byslice for theslice in range(numslices): if verbose: print("averaging slice", theslice) if usemask: validvoxels = np.where(np.abs(thismask_byslice[:, theslice]) > 0)[0] else: validvoxels = np.where(np.abs(thismask_byslice[:, theslice] >= 0))[0] if len(validvoxels) > 0: if madnorm: sliceavs[theslice, :], slicenorms[theslice] = tide_math.madnormalize( np.mean( normdata_byslice[validvoxels, theslice, :] * thismask_byslice[validvoxels, theslice, np.newaxis], axis=0, ), returnnormfac=True, ) else: sliceavs[theslice, :] = np.mean( normdata_byslice[validvoxels, theslice, :] * thismask_byslice[validvoxels, theslice, np.newaxis], axis=0, ) slicenorms[theslice] = 1.0 for t in range(timepoints): hirestc[numsteps * t + sliceoffsets[theslice]] += sliceavs[theslice, t] for i in range(numsteps): cycleaverage[i] = np.mean(hirestc[i:-1:numsteps]) for t in range(len(hirestc)): if multiplicative: hirestc[t] /= cycleaverage[t % numsteps] + 1.0 else: hirestc[t] -= cycleaverage[t % numsteps] if not verbose: print("done") slicesamplerate = 1.0 * numsteps / tr print("slice sample rate is ", slicesamplerate) # delete the TR frequency and the first subharmonic print("notch filtering...") filthirestc = tide_filt.harmonicnotchfilter( hirestc, slicesamplerate, 1.0 / tr, notchpct=notchpct, debug=debug ) # now get the cardiac and respiratory waveforms hirescardtc, cardnormfac = tide_math.madnormalize( cardprefilter.apply(slicesamplerate, filthirestc), returnnormfac=True ) hirescardtc *= -1.0 cardnormfac *= np.mean(slicenorms) hiresresptc, respnormfac = tide_math.madnormalize( respprefilter.apply(slicesamplerate, filthirestc), returnnormfac=True ) hiresresptc *= -1.0 respnormfac *= np.mean(slicenorms) return ( hirescardtc, cardnormfac, hiresresptc, respnormfac, slicesamplerate, numsteps, cycleaverage, slicenorms, ) def savgolsmooth(data, smoothlen=101, polyorder=3): return savgol_filter(data, smoothlen, polyorder) def getfundamental(inputdata, Fs, fundfreq): arb_lower = 0.71 * fundfreq arb_upper = 1.4 * fundfreq arb_lowerstop = 0.9 * arb_lower arb_upperstop = 1.1 * arb_upper thefundfilter = tide_filt.NoncausalFilter(filtertype="arb") thefundfilter.setfreqs(arb_lowerstop, arb_lower, arb_upper, arb_upperstop) return thefundfilter.apply(Fs, inputdata) def getcardcoeffs( cardiacwaveform, slicesamplerate, minhr=40.0, maxhr=140.0, smoothlen=101, debug=False, display=False, ): if len(cardiacwaveform) > 1024: thex, they = welch(cardiacwaveform, slicesamplerate, nperseg=1024) else: thex, they = welch(cardiacwaveform, slicesamplerate) initpeakfreq = np.round(thex[np.argmax(they)] * 60.0, 2) if initpeakfreq > maxhr: initpeakfreq = maxhr if initpeakfreq < minhr: initpeakfreq = minhr if debug: print("initpeakfreq:", initpeakfreq, "BPM") freqaxis, spectrum = tide_filt.spectrum( tide_filt.hamming(len(cardiacwaveform)) * cardiacwaveform, Fs=slicesamplerate, mode="complex", ) # remove any spikes at zero frequency minbin = int(minhr // (60.0 * (freqaxis[1] - freqaxis[0]))) maxbin = int(maxhr // (60.0 * (freqaxis[1] - freqaxis[0]))) spectrum[:minbin] = 0.0 spectrum[maxbin:] = 0.0 # find the max ampspec = savgolsmooth(np.abs(spectrum), smoothlen=smoothlen) if display: figure() plot(freqaxis, ampspec, "r") show() peakfreq = freqaxis[np.argmax(ampspec)] if debug: print("cardiac fundamental frequency is", np.round(peakfreq * 60.0, 2), "BPM") normfac = np.sqrt(2.0) * tide_math.rms(cardiacwaveform) if debug: print("normfac:", normfac) return peakfreq def normalizevoxels(fmri_data, detrendorder, validvoxels, time, timings, showprogressbar=False): print("normalizing voxels...") normdata = fmri_data * 0.0 demeandata = fmri_data * 0.0 starttime = time.time() # detrend if we are going to numspatiallocs = fmri_data.shape[0] reportstep = int(numspatiallocs // 100) if detrendorder > 0: print("detrending to order", detrendorder, "...") for idx, thevox in enumerate(validvoxels): if ((idx % reportstep == 0) or (idx == len(validvoxels) - 1)) and showprogressbar: tide_util.progressbar(idx + 1, len(validvoxels), label="Percent complete") fmri_data[thevox, :] = tide_fit.detrend( fmri_data[thevox, :], order=detrendorder, demean=False ) timings.append(["Detrending finished", time.time(), numspatiallocs, "voxels"]) print(" done") means = np.mean(fmri_data[:, :], axis=1).flatten() demeandata[validvoxels, :] = fmri_data[validvoxels, :] - means[validvoxels, None] normdata[validvoxels, :] = np.nan_to_num(demeandata[validvoxels, :] / means[validvoxels, None]) timings.append(["Normalization finished", time.time(), numspatiallocs, "voxels"]) print("normalization took", time.time() - starttime, "seconds") return normdata, demeandata, means def cleancardiac(Fs, plethwaveform, cutoff=0.4, thresh=0.2, nyquist=None, debug=False): # first bandpass the cardiac signal to calculate the envelope if debug: print("entering cleancardiac") plethfilter = tide_filt.NoncausalFilter("cardiac") print("filtering") print("envelope detection") envelope = tide_math.envdetect( Fs, tide_math.madnormalize(plethfilter.apply(Fs, tide_math.madnormalize(plethwaveform))), cutoff=cutoff, ) envmean = np.mean(envelope) # now patch the envelope function to eliminate very low values envlowerlim = thresh * np.max(envelope) envelope = np.where(envelope >= envlowerlim, envelope, envlowerlim) # now high pass the plethysmogram to eliminate baseline arb_lowerstop, arb_lowerpass, arb_upperpass, arb_upperstop = plethfilter.getfreqs() plethfilter.settype("arb") arb_upper = 10.0 arb_upperstop = arb_upper * 1.1 if nyquist is not None: if nyquist < arb_upper: arb_upper = nyquist arb_upperstop = nyquist plethfilter.setfreqs(arb_lowerstop, arb_lowerpass, arb_upperpass, arb_upperstop) filtplethwaveform = tide_math.madnormalize( plethfilter.apply(Fs, tide_math.madnormalize(plethwaveform)) ) print("normalizing") normpleth = tide_math.madnormalize(envmean * filtplethwaveform / envelope) # return the filtered waveform, the normalized waveform, and the envelope if debug: print("leaving cleancardiac") return filtplethwaveform, normpleth, envelope, envmean def findbadpts( thewaveform, nameroot, outputroot, samplerate, infodict, thetype="mad", retainthresh=0.89, mingap=2.0, outputlevel=0, debug=True, ): # if thetype == 'triangle' or thetype == 'mad': if thetype == "mad": absdev = np.fabs(thewaveform - np.median(thewaveform)) # if thetype == 'triangle': # thresh = threshold_triangle(np.reshape(absdev, (len(absdev), 1))) medianval = np.median(thewaveform) sigma = mad(thewaveform, center=medianval) numsigma = np.sqrt(1.0 / (1.0 - retainthresh)) thresh = numsigma * sigma thebadpts = np.where(absdev >= thresh, 1.0, 0.0) print( "bad point threshhold set to", thresh, "using the", thetype, "method for", nameroot, ) elif thetype == "fracval": lower, upper = tide_stats.getfracvals( thewaveform, [(1.0 - retainthresh) / 2.0, (1.0 + retainthresh) / 2.0], ) therange = upper - lower lowerthresh = lower - therange upperthresh = upper + therange thebadpts = np.where((lowerthresh <= thewaveform) & (thewaveform <= upperthresh), 0.0, 1.0) thresh = (lowerthresh, upperthresh) print( "values outside of ", lowerthresh, "to", upperthresh, "marked as bad using the", thetype, "method for", nameroot, ) else: print("bad thresholding type") sys.exit() # now fill in gaps streakthresh = int(np.round(mingap * samplerate)) lastbad = 0 if thebadpts[0] == 1.0: isbad = True else: isbad = False for i in range(1, len(thebadpts)): if thebadpts[i] == 1.0: if not isbad: # streak begins isbad = True if i - lastbad < streakthresh: thebadpts[lastbad:i] = 1.0 lastbad = i else: isbad = False if len(thebadpts) - lastbad - 1 < streakthresh: thebadpts[lastbad:] = 1.0 if outputlevel > 0: tide_io.writevec(thebadpts, outputroot + "_" + nameroot + "_badpts.txt") infodict[nameroot + "_threshvalue"] = thresh infodict[nameroot + "_threshmethod"] = thetype return thebadpts def approximateentropy(waveform, m, r): def _maxdist(x_i, x_j): return max([abs(ua - va) for ua, va in zip(x_i, x_j)]) def _phi(m): x = [[waveform[j] for j in range(i, i + m - 1 + 1)] for i in range(N - m + 1)] C = [len([1 for x_j in x if _maxdist(x_i, x_j) <= r]) / (N - m + 1.0) for x_i in x] return (N - m + 1.0) ** (-1) * sum(np.log(C)) N = len(waveform) return abs(_phi(m + 1) - _phi(m)) def entropy(waveform): return -np.sum(np.square(waveform) * np.nan_to_num(np.log2(np.square(waveform)))) def calcplethquality( waveform, Fs, infodict, suffix, outputroot, S_windowsecs=5.0, K_windowsecs=60.0, E_windowsecs=1.0, detrendorder=8, outputlevel=0, debug=False, ): """ Parameters ---------- waveform: array-like The cardiac waveform to be assessed Fs: float The sample rate of the data S_windowsecs: float Skewness window duration in seconds. Defaults to 5.0 (optimal for discrimination of "good" from "acceptable" and "unfit" according to Elgendi) K_windowsecs: float Skewness window duration in seconds. Defaults to 2.0 (after Selveraj) E_windowsecs: float Entropy window duration in seconds. Defaults to 0.5 (after Selveraj) detrendorder: int Order of detrending polynomial to apply to plethysmogram. debug: boolean Turn on extended output Returns ------- S_sqi_mean: float The mean value of the quality index over all time S_std_mean: float The standard deviation of the quality index over all time S_waveform: array The quality metric over all timepoints K_sqi_mean: float The mean value of the quality index over all time K_std_mean: float The standard deviation of the quality index over all time K_waveform: array The quality metric over all timepoints E_sqi_mean: float The mean value of the quality index over all time E_std_mean: float The standard deviation of the quality index over all time E_waveform: array The quality metric over all timepoints Calculates the windowed skewness, kurtosis, and entropy quality metrics described in Elgendi, M. "Optimal Signal Quality Index for Photoplethysmogram Signals". Bioengineering 2016, Vol. 3, Page 21 3, 21 (2016). """ # detrend the waveform dt_waveform = tide_fit.detrend(waveform, order=detrendorder, demean=True) # calculate S_sqi and K_sqi over a sliding window. Window size should be an odd number of points. S_windowpts = int(np.round(S_windowsecs * Fs, 0)) S_windowpts += 1 - S_windowpts % 2 S_waveform = dt_waveform * 0.0 K_windowpts = int(np.round(K_windowsecs * Fs, 0)) K_windowpts += 1 - K_windowpts % 2 K_waveform = dt_waveform * 0.0 E_windowpts = int(np.round(E_windowsecs * Fs, 0)) E_windowpts += 1 - E_windowpts % 2 E_waveform = dt_waveform * 0.0 if debug: print("S_windowsecs, S_windowpts:", S_windowsecs, S_windowpts) print("K_windowsecs, K_windowpts:", K_windowsecs, K_windowpts) print("E_windowsecs, E_windowpts:", E_windowsecs, E_windowpts) for i in range(0, len(dt_waveform)): startpt = np.max([0, i - S_windowpts // 2]) endpt = np.min([i + S_windowpts // 2, len(dt_waveform)]) S_waveform[i] = skew(dt_waveform[startpt : endpt + 1], nan_policy="omit") startpt = np.max([0, i - K_windowpts // 2]) endpt = np.min([i + K_windowpts // 2, len(dt_waveform)]) K_waveform[i] = kurtosis(dt_waveform[startpt : endpt + 1], fisher=False) startpt = np.max([0, i - E_windowpts // 2]) endpt = np.min([i + E_windowpts // 2, len(dt_waveform)]) # E_waveform[i] = entropy(dt_waveform[startpt:endpt + 1]) r = 0.2 * np.std(dt_waveform[startpt : endpt + 1]) E_waveform[i] = approximateentropy(dt_waveform[startpt : endpt + 1], 2, r) if debug: print( i, startpt, endpt, endpt - startpt + 1, S_waveform[i], K_waveform[i], E_waveform[i], ) S_sqi_mean = np.mean(S_waveform) S_sqi_std = np.std(S_waveform) K_sqi_mean = np.mean(K_waveform) K_sqi_std = np.std(K_waveform) E_sqi_mean = np.mean(E_waveform) E_sqi_std = np.std(E_waveform) infodict["S_sqi_mean" + suffix] = S_sqi_mean infodict["S_sqi_std" + suffix] = S_sqi_std infodict["K_sqi_mean" + suffix] = K_sqi_mean infodict["K_sqi_std" + suffix] = K_sqi_std infodict["E_sqi_mean" + suffix] = E_sqi_mean infodict["E_sqi_std" + suffix] = E_sqi_std if outputlevel > 1: tide_io.writevec(S_waveform, outputroot + suffix + "_S_sqi_" + str(Fs) + "Hz.txt") tide_io.writevec(K_waveform, outputroot + suffix + "_K_sqi_" + str(Fs) + "Hz.txt") tide_io.writevec(E_waveform, outputroot + suffix + "_E_sqi_" + str(Fs) + "Hz.txt") def getphysiofile( cardiacfile, colnum, colname, inputfreq, inputstart, slicetimeaxis, stdfreq, envcutoff, envthresh, timings, infodict, outputroot, outputlevel=0, debug=False, ): if debug: print("entering getphysiofile") print("reading cardiac signal from file") infodict["cardiacfromfmri"] = False # check file type filebase, extension = os.path.splitext(cardiacfile) if debug: print("filebase:", filebase) print("extension:", extension) if extension == ".json": inputfreq, inputstart, pleth_fullres = tide_io.readcolfrombidstsv( cardiacfile, columnname=colname, columnnum=colnum, debug=debug ) else: pleth_fullres = np.transpose(tide_io.readvecs(cardiacfile)) print(pleth_fullres.shape) if len(pleth_fullres.shape) != 1: pleth_fullres = pleth_fullres[:, colnum].flatten() if debug: print("inputfreq:", inputfreq) print("inputstart:", inputstart) print("pleth_fullres:", pleth_fullres) inputtimeaxis = ( sp.arange(0.0, (1.0 / inputfreq) * len(pleth_fullres), 1.0 / inputfreq) + inputstart ) if inputtimeaxis[0] > 0.0 or inputtimeaxis[-1] < slicetimeaxis[-1]: print("getphysiofile: error - plethysmogram waveform does not cover the fmri time range") sys.exit() if debug: print("pleth_fullres: len=", len(pleth_fullres), "vals=", pleth_fullres) print("inputfreq =", inputfreq) print("inputstart =", inputstart) print("inputtimeaxis: len=", len(inputtimeaxis), "vals=", inputtimeaxis) timings.append(["Cardiac signal from physiology data read in", time.time(), None, None]) # filter and amplitude correct the waveform to remove gain fluctuations cleanpleth_fullres, normpleth_fullres, plethenv_fullres, envmean = cleancardiac( inputfreq, pleth_fullres, cutoff=envcutoff, thresh=envthresh, nyquist=inputfreq / 2.0, debug=debug, ) infodict["plethsamplerate"] = inputfreq infodict["numplethpts_fullres"] = len(pleth_fullres) if outputlevel > 1: tide_io.writevec(pleth_fullres, outputroot + "_rawpleth_native.txt") tide_io.writevec(cleanpleth_fullres, outputroot + "_pleth_native.txt") tide_io.writevec(plethenv_fullres, outputroot + "_cardenvelopefromfile_native.txt") timings.append(["Cardiac signal from physiology data cleaned", time.time(), None, None]) # resample to slice time resolution and save pleth_sliceres = tide_resample.doresample( inputtimeaxis, cleanpleth_fullres, slicetimeaxis, method="univariate", padlen=0 ) infodict["numplethpts_sliceres"] = len(pleth_sliceres) # resample to standard resolution and save pleth_stdres = tide_math.madnormalize( tide_resample.arbresample( cleanpleth_fullres, inputfreq, stdfreq, decimate=True, debug=False ) ) infodict["numplethpts_stdres"] = len(pleth_stdres) timings.append( [ "Cardiac signal from physiology data resampled to slice resolution and saved", time.time(), None, None, ] ) if debug: print("leaving getphysiofile") return pleth_sliceres, pleth_stdres def readextmask(thefilename, nim_hdr, xsize, ysize, numslices): ( extmask, extmask_data, extmask_hdr, theextmaskdims, theextmasksizes, ) = tide_io.readfromnifti(thefilename) ( xsize_extmask, ysize_extmask, numslices_extmask, timepoints_extmask, ) = tide_io.parseniftidims(theextmaskdims) if not tide_io.checkspacematch(nim_hdr, extmask_hdr): print("Dimensions of mask do not match the fmri data - exiting") sys.exit() if timepoints_extmask > 1: print("Mask must have only 3 dimensions - exiting") sys.exit() return extmask_data.reshape(xsize * ysize, numslices) def checkcardmatch(reference, candidate, samplerate, refine=True, debug=False): """ Parameters ---------- reference: 1D numpy array The cardiac waveform to compare to candidate: 1D numpy array The cardiac waveform to be assessed samplerate: float The sample rate of the data in Hz refine: bool, optional Whether to refine the peak fit. Default is True. debug: bool, optional Output additional information for debugging Returns ------- maxval: float The maximum value of the crosscorrelation function maxdelay: float The time, in seconds, where the maximum crosscorrelation occurs. failreason: flag Reason why the fit failed (0 if no failure) """ thecardfilt = tide_filt.NoncausalFilter(filtertype="cardiac") trimlength = np.min([len(reference), len(candidate)]) thexcorr = tide_corr.fastcorrelate( tide_math.corrnormalize( thecardfilt.apply(samplerate, reference), detrendorder=3, windowfunc="hamming", )[:trimlength], tide_math.corrnormalize( thecardfilt.apply(samplerate, candidate), detrendorder=3, windowfunc="hamming", )[:trimlength], usefft=True, ) xcorrlen = len(thexcorr) sampletime = 1.0 / samplerate xcorr_x = np.r_[0.0:xcorrlen] * sampletime - (xcorrlen * sampletime) / 2.0 + sampletime / 2.0 searchrange = 5.0 trimstart = tide_util.valtoindex(xcorr_x, -2.0 * searchrange) trimend = tide_util.valtoindex(xcorr_x, 2.0 * searchrange) ( maxindex, maxdelay, maxval, maxsigma, maskval, failreason, peakstart, peakend, ) = tide_fit.findmaxlag_gauss( xcorr_x[trimstart:trimend], thexcorr[trimstart:trimend], -searchrange, searchrange, 3.0, refine=refine, zerooutbadfit=False, useguess=False, fastgauss=False, displayplots=False, ) if debug: print( "CORRELATION: maxindex, maxdelay, maxval, maxsigma, maskval, failreason, peakstart, peakend:", maxindex, maxdelay, maxval, maxsigma, maskval, failreason, peakstart, peakend, ) return maxval, maxdelay, failreason def cardiaccycleaverage( sourcephases, destinationphases, waveform, procpoints, congridbins, gridkernel, centric, cyclic=True, ): rawapp_bypoint = np.zeros(len(destinationphases), dtype=np.float64) weight_bypoint = np.zeros(len(destinationphases), dtype=np.float64) for t in procpoints: thevals, theweights, theindices = tide_resample.congrid( destinationphases, tide_math.phasemod(sourcephases[t], centric=centric), 1.0, congridbins, kernel=gridkernel, cyclic=cyclic, ) for i in range(len(theindices)): weight_bypoint[theindices[i]] += theweights[i] rawapp_bypoint[theindices[i]] += theweights[i] * waveform[t] rawapp_bypoint = np.where( weight_bypoint > np.max(weight_bypoint) / 50.0, np.nan_to_num(rawapp_bypoint / weight_bypoint), 0.0, ) minval = np.min(rawapp_bypoint[np.where(weight_bypoint > np.max(weight_bypoint) / 50.0)]) rawapp_bypoint = np.where( weight_bypoint > np.max(weight_bypoint) / 50.0, rawapp_bypoint - minval, 0.0 ) return rawapp_bypoint def circularderivs(timecourse): firstderiv = np.diff(timecourse, append=[timecourse[0]]) return ( np.max(firstderiv), np.argmax(firstderiv), np.min(firstderiv), np.argmin(firstderiv), ) def findphasecuts(phases): max_peaks = [] min_peaks = [] thisval = phases[0] for i in range(1, len(phases)): if thisval - phases[i] > np.pi: max_peaks.append([i - 1, thisval]) min_peaks.append([i, phases[i]]) thisval = phases[i] return max_peaks, min_peaks def happy_main(thearguments): # get the command line parameters debug = False centric = True savetcsastsv = False histlen = 100 smoothlen = 101 envcutoff = 0.4 envthresh = 0.2 maskthreshpct = 10.0 upsamplefac = 100 destpoints = 32 congridbins = 3.0 gridkernel = "kaiser" cardiacfilename = None colnum = None colname = None inputfreq = 32.0 inputstart = 0.0 dospatialglm = False dotemporalglm = False notchpct = 1.5 minhr = 40.0 maxhr = 140.0 minhrfilt = 40.0 maxhrfilt = 1000.0 softvesselfrac = 0.4 infodict = {} stdfreq = 25.0 nprocs = 1 mklthreads = 1 savecardiacnoise = True forcedhr = None usemaskcardfromfmri = True censorbadpts = True estmaskname = None projmaskname = None detrendorder = 3 filtphase = True savemotionglmfilt = False motionfilename = None cardcalconly = False domadnorm = True numskip = 0 motskip = 0 dodlfilter = False modelname = "model_revised" motionhp = None motionlp = None motfilt_pos = False motfilt_deriv = True motfilt_derivdelayed = True orthogonalize = True mpfix = False aligncardiac = True projectwithraw = False saveinfoasjson = False outputlevel = 1 verbose = False smoothapp = True unnormvesselmap = True fliparteries = False pulsereconstepsize = 0.01 arteriesonly = False saveintermediate = False showprogressbar = True doaliasedcorrelation = False aliasedcorrelationwidth = 1.25 aliasedcorrelationpts = 101 # start the clock! timings = [["Start", time.time(), None, None]] """print( "***********************************************************************************************************************************") print("NOTICE: This program is NOT released yet - it's a work in progress and is nowhere near done. That's why") print("there's no documentation or mention in the release notes. If you want to play with it, be my guest, but be") print("aware of the following:") print(" 1) Any given version of this program may or may not work, or may work in a way different than ") print(" a) previous versions, b) what I say it does, c) what I think it does, and d) what you want it to do.") print( " 2) I am intending to write a paper on this, and if you take this code and scoop me, I'll be peeved. That's just rude.") print(" 3) For all I know this program might burn down your house, leave your milk out of the refrigerator, or ") print(" poison your dog. USE AT YOUR OWN RISK.") print( "***********************************************************************************************************************************") print("")""" fmrifilename = thearguments[1] slicetimename = thearguments[2] outputroot = thearguments[3] infodict["fmrifilename"] = fmrifilename infodict["slicetimename"] = slicetimename infodict["outputroot"] = outputroot # save program version ( infodict["release_version"], infodict["git_longtag"], infodict["git_date"], infodict["git_isdirty"], ) = tide_util.version() # record the machine we ran on infodict["hostname"] = platform.node() print("running version", infodict["release_version"], "on host", infodict["hostname"]) optparsestart = 4 # now scan for optional arguments try: opts, args = getopt.getopt( thearguments[optparsestart:], "x", [ "cardiacfile=", "cardiacfreq=", "cardiactstep=", "cardiacstart=", "maxhr=", "minhr=", "maxhrfilt=", "minhrfilt=", "envcutoff=", "envthresh=", "notchwidth=", "disablenotch", "nodetrend", "motionfile=", "spatialglm", "temporalglm", "debug", "motionhp=", "motionlp=", "cardcalconly", "outputbins=", "gridbins=", "gridkernel=", "stdfreq=", "nprocs=", "mklthreads=", "arteriesonly", "estmask=", "projmask=", "smoothlen=", "forcehr=", "numskip=", "motskip=", "nocensor", "noappsmooth", "nomadnorm", "dodlfilter", "noncentric", "model=", "noprogressbar", "usesuperdangerousworkaround", "saveintermediate", "savemotionglmfilt", "saveinfoasjson", "savetcsastsv", "nophasefilt", "projectwithraw", "trimcorrelations", "fliparteries", "nomask", "noorthog", "nocardiacalign", "nomotderiv", "nomotderivdelayed", "increaseoutputlevel", "decreaseoutputlevel", "aliasedcorrelation", "help", ], ) except getopt.GetoptError as err: # print help information and exit: print(str(err)) # will print something like "option -x not recognized" usage() sys.exit(2) formattedcmdline = [thearguments[0] + " \\"] for thearg in range(1, optparsestart): formattedcmdline.append("\t" + thearguments[thearg] + " \\") for o, a in opts: linkchar = " " if o == "-x": print("Got an x") elif o == "--motionfile": motionfilename = a print("Will regress motion out of data prior to analysis") elif o == "--saveintermediate": saveintermediate = True print("Will save some data from intermediate passes") elif o == "--aliasedcorrelation": doaliasedcorrelation = True print("Will get absolute cardiac delay using aliased correlation function") elif o == "--arteriesonly": arteriesonly = True print("Will only use arterial blood for generating cardiac waveform") elif o == "--spatialglm": dospatialglm = True print("Will generate and remove cardiac signal maps for each timepoint") elif o == "--temporalglm": dotemporalglm = True print("Will generate and remove cardiac timecourses from each voxel") elif o == "--disablenotch": notchpct = None print("Disabling subharmonic notch filter") elif o == "--nodetrend": detrendorder = 0 print("Will disable data detrending") elif o == "--debug": debug = True print("Extended debugging messages") elif o == "--increaseoutputlevel": outputlevel += 1 print("Increased output level to", outputlevel) elif o == "--decreaseoutputlevel": outputlevel -= 1 if outputlevel < 0: outputlevel = 0 print("Decreased output level to", outputlevel) elif o == "--savemotionglmfilt": savemotionglmfilt = True elif o == "--fliparteries": fliparteries = True print("Will detect and invert arterial timecourses.") elif o == "--nophasefilt": filtphase = False print("Disabling phase trend filter") elif o == "--nocardiacalign": aligncardiac = False print("Disabling cardiac alignment") elif o == "--noncentric": centric = False print("Performing noncentric projection") elif o == "--dodlfilter": if dlfilterexists: dodlfilter = True print("Will apply deep learning filter to enhance the cardiac waveforms") else: print( "dlfilter not found - check to make sure Keras is installed and working. Disabling." ) elif o == "--model": linkchar = "=" modelname = a print("Will use", modelname, "for the deep learning filter;") elif o == "--noprogressbar": showprogressbar = False print("Will disable progress bars") elif o == "--cardcalconly": cardcalconly = True print("Will stop processing after calculating cardiac waveforms") elif o == "--noappsmooth": smoothapp = False print("Will not smooth projection along phase direction") elif o == "--nomask": usemaskcardfromfmri = False elif o == "--nocensor": censorbadpts = False print("Will not censor bad points") elif o == "--projectwithraw": projectwithraw = True print("Will use fmri derived cardiac waveform as phase source for projection") elif o == "--nomadnorm": domadnorm = False print("Disabling MAD normalization between slices") elif o == "--outputbins": linkchar = "=" destpoints = int(a) print("Will use", destpoints, "output bins") elif o == "--numskip": linkchar = "=" numskip = int(a) print("Skipping first", numskip, "fmri trs") elif o == "--motskip": linkchar = "=" motskip = int(a) print("Skipping first", motskip, "motion trs") elif o == "--smoothlen": linkchar = "=" smoothlen = int(a) smoothlen = smoothlen + (1 - smoothlen % 2) print("Will set savitsky-golay window to", smoothlen) elif o == "--gridbins": linkchar = "=" congridbins = float(a) print("Will use a convolution gridding kernel of width", congridbins, "bins") elif o == "--gridkernel": linkchar = "=" gridkernel = a if gridkernel == "kaiser": print("Will use a kaiser-bessel gridding kernel") elif gridkernel == "gauss": print("Will use a gaussian gridding kernel") elif gridkernel == "old": print("Falling back to old style gridding") else: print("Illegal gridding kernel specified - aborting") sys.exit() elif o == "--usesuperdangerousworkaround": mpfix = True print("Trying super dangerous workaround to make dlfilter work") elif o == "--notchwidth": linkchar = "=" notchpct = float(a) print("Setting notchwidth to", notchpct, "%") elif o == "--nprocs": linkchar = "=" nprocs = int(a) if nprocs < 1: nprocs = tide_multiproc.maxcpus() print("Will use", nprocs, "processors for long calculations") elif o == "--mklthreads": mklthreads = int(a) linkchar = "=" if mklexists: mklmaxthreads = mkl.get_max_threads() if mklthreads > mklmaxthreads: print("mkl max threads =", mklmaxthreads, " - using max") mklthreads = mklmaxthreads print( "Will use", mklthreads, "MKL threads for accelerated numpy processing.", ) else: print("MKL not present - ignoring --mklthreads") elif o == "--stdfreq": linkchar = "=" stdfreq = float(a) print("Setting common output frequency to", stdfreq) elif o == "--envcutoff": linkchar = "=" envcutoff = float(a) print("Will set top of cardiac envelope band to", envcutoff) elif o == "--envthresh": linkchar = "=" envthresh = float(a) print( "Will set lowest value of cardiac envelope band to", envthresh, "x the maximum value", ) elif o == "--minhr": newval = float(a) print( "Will set bottom of cardiac search range to", newval, "BPM from", minhr, "BPM", ) minhr = newval elif o == "--maxhr": linkchar = "=" newval = float(a) print( "Will set top of cardiac search range to", newval, "BPM from", maxhr, "BPM", ) maxhr = newval elif o == "--minhrfilt": linkchar = "=" newval = float(a) print( "Will set bottom of cardiac band to", newval, "BPM from", minhrfilt, "BPM when estimating waveform", ) minhrfilt = newval elif o == "--maxhrfilt": linkchar = "=" newval = float(a) print( "Will set top of cardiac band to", newval, "BPM from", maxhrfilt, "BPM when estimating waveform", ) maxhrfilt = newval elif o == "--forcehr": linkchar = "=" forcedhr = float(a) / 60.0 print("Force heart rate detector to", forcedhr * 60.0, "BPM") elif o == "--motionhp": linkchar = "=" motionhp = float(a) print("Will highpass motion regressors at", motionhp, "Hz prior to regression") elif o == "--motionlp": linkchar = "=" motionlp = float(a) print("Will lowpass motion regressors at", motionlp, "Hz prior to regression") elif o == "--savetcsastsv": savetcsastsv = True print("Will save timecourses in BIDS tsv format") elif o == "--saveinfoasjson": saveinfoasjson = True print("Will save info file in json format") elif o == "--trimcorrelations": trimcorrelations = True print("Will be tolerant of short physiological timecourses") elif o == "--noorthog": orthogonalize = False print("Will not orthogonalize motion regressors") elif o == "--nomotderivdelayed": motfilt_derivdelayed = False print("Will not use motion position regressors") elif o == "--nomotderiv": motfilt_deriv = False print("Will not use motion derivative regressors") elif o == "--estmask": estmaskname = a usemaskcardfromfmri = True print("Will restrict cardiac waveform fit to voxels in", estmaskname) elif o == "--projmask": projmaskname = a useintensitymask = False usemaskcardfromfmri = True print("Will restrict phase projection to voxels in", projmaskname) elif o == "--cardiacfile": linkchar = "=" inputlist = a.split(":") cardiacfilename = inputlist[0] if len(inputlist) > 1: try: colnum = int(inputlist[1]) except ValueError: colname = inputlist[1] print("Will use cardiac file", cardiacfilename) elif o == "--cardiacfreq": linkchar = "=" inputfreq = float(a) print("Setting cardiac sample frequency to ", inputfreq) elif o == "--cardiactstep": linkchar = "=" inputfreq = 1.0 / float(a) print("Setting cardiac sample time step to ", float(a)) elif o == "--cardiacstart": linkchar = "=" inputstart = float(a) print("Setting cardiac start time to ", inputstart) elif o == "--help": usage() sys.exit() else: assert False, "unhandled option: " + o formattedcmdline.append("\t" + o + linkchar + a + " \\") formattedcmdline[len(formattedcmdline) - 1] = formattedcmdline[len(formattedcmdline) - 1][:-2] # write out the command used tide_util.savecommandline(thearguments, outputroot) tide_io.writevec(formattedcmdline, outputroot + "_formattedcommandline.txt") memfile = open(outputroot + "_memusage.csv", "w") tide_util.logmem(None, file=memfile) # set the number of MKL threads to use if mklexists: mkl.set_num_threads(mklthreads) # if we are going to do a glm, make sure we are generating app matrix if (dotemporalglm or dospatialglm) and cardcalconly: print("doing glm fit requires phase projection - setting cardcalconly to False") cardcalconly = False # save important configuration options infodict["aliasedcorrelationpts"] = aliasedcorrelationpts infodict["aliasedcorrelationwidth"] = aliasedcorrelationwidth infodict["aligncardiac"] = aligncardiac infodict["arteriesonly"] = arteriesonly infodict["cardcalconly"] = cardcalconly infodict["cardiacfilename"] = cardiacfilename infodict["censorbadpts"] = censorbadpts infodict["centric"] = centric infodict["colname"] = colname infodict["colnum"] = colnum infodict["congridbins"] = congridbins infodict["debug"] = debug infodict["destpoints"] = destpoints infodict["detrendorder"] = detrendorder infodict["doaliasedcorrelation"] = doaliasedcorrelation infodict["dodlfilter"] = dodlfilter infodict["domadnorm"] = domadnorm infodict["dospatialglm"] = dospatialglm infodict["dotemporalglm"] = dotemporalglm infodict["envcutoff"] = envcutoff infodict["envthresh"] = envthresh infodict["filtphase"] = filtphase infodict["fliparteries"] = fliparteries infodict["forcedhr"] = forcedhr infodict["gridkernel"] = gridkernel infodict["histlen"] = histlen infodict["inputfreq"] = inputfreq infodict["inputstart"] = inputstart infodict["maskthreshpct"] = maskthreshpct infodict["maxhr"] = maxhr infodict["maxhrfilt"] = maxhrfilt infodict["minhr"] = minhr infodict["minhrfilt"] = minhrfilt infodict["mklthreads"] = mklthreads infodict["modelname"] = modelname infodict["motionfilename"] = motionfilename infodict["motionhp"] = motionhp infodict["motionlp"] = motionlp infodict["motskip"] = motskip infodict["mpfix"] = mpfix infodict["nprocs"] = nprocs infodict["numskip"] = numskip infodict["orthogonalize"] = orthogonalize infodict["outputlevel"] = outputlevel infodict["projectwithraw"] = projectwithraw infodict["projmaskname"] = projmaskname infodict["pulsereconstepsize"] = pulsereconstepsize infodict["savecardiacnoise"] = savecardiacnoise infodict["saveinfoasjson"] = saveinfoasjson infodict["saveintermediate"] = saveintermediate infodict["showprogressbar"] = showprogressbar infodict["smoothapp"] = smoothapp infodict["smoothlen"] = smoothlen infodict["softvesselfrac"] = softvesselfrac infodict["stdfreq"] = stdfreq infodict["unnormvesselmap"] = unnormvesselmap infodict["upsamplefac"] = upsamplefac infodict["usemaskcardfromfmri"] = usemaskcardfromfmri infodict["verbose"] = verbose # set up cardiac filter arb_lower = minhrfilt / 60.0 arb_upper = maxhrfilt / 60.0 thecardbandfilter = tide_filt.NoncausalFilter() thecardbandfilter.settype("arb") arb_lowerstop = arb_lower * 0.9 arb_upperstop = arb_upper * 1.1 thecardbandfilter.setfreqs(arb_lowerstop, arb_lower, arb_upper, arb_upperstop) therespbandfilter = tide_filt.NoncausalFilter() therespbandfilter.settype("resp") infodict["filtermaxbpm"] = arb_upper * 60.0 infodict["filterminbpm"] = arb_lower * 60.0 infodict["notchpct"] = notchpct timings.append(["Argument parsing done", time.time(), None, None]) # read in the image data tide_util.logmem("before reading in fmri data", file=memfile) nim, nim_data, nim_hdr, thedims, thesizes = tide_io.readfromnifti(fmrifilename) input_data = tide_classes.fMRIDataset(nim_data, numskip=numskip) timepoints = input_data.timepoints xsize = input_data.xsize ysize = input_data.ysize numslices = input_data.numslices xdim, ydim, slicethickness, tr = tide_io.parseniftisizes(thesizes) spaceunit, timeunit = nim_hdr.get_xyzt_units() if timeunit == "msec": tr /= 1000.0 mrsamplerate = 1.0 / tr print("tr is", tr, "seconds, mrsamplerate is", mrsamplerate) numspatiallocs = int(xsize) * int(ysize) * int(numslices) infodict["tr"] = tr infodict["mrsamplerate"] = mrsamplerate timings.append(["Image data read in", time.time(), None, None]) # remap to space by time fmri_data = input_data.byvol() del nim_data # make and save a mask of the voxels to process based on image intensity tide_util.logmem("before mask creation", file=memfile) mask = np.uint16( tide_stats.makemask(np.mean(fmri_data[:, :], axis=1), threshpct=maskthreshpct) ) validvoxels = np.where(mask > 0)[0] theheader = copy.deepcopy(nim_hdr) theheader["dim"][4] = 1 timings.append(["Mask created", time.time(), None, None]) if outputlevel > 0: tide_io.savetonifti( mask.reshape((xsize, ysize, numslices)), theheader, outputroot + "_mask" ) timings.append(["Mask saved", time.time(), None, None]) mask_byslice = mask.reshape((xsize * ysize, numslices)) # read in projection mask if present otherwise fall back to intensity mask if projmaskname is not None: tide_util.logmem("before reading in projmask", file=memfile) projmask_byslice = readextmask( projmaskname, nim_hdr, xsize, ysize, numslices ) * np.float64(mask_byslice) else: projmask_byslice = mask_byslice # filter out motion regressors here if motionfilename is not None: timings.append(["Motion filtering start", time.time(), None, None]) (motionregressors, motionregressorlabels, filtereddata,) = tide_glmpass.motionregress( motionfilename, fmri_data[validvoxels, :], tr, orthogonalize=orthogonalize, motstart=motskip, motionhp=motionhp, motionlp=motionlp, position=motfilt_pos, deriv=motfilt_deriv, derivdelayed=motfilt_derivdelayed, ) fmri_data[validvoxels, :] = filtereddata[:, :] infodict["numorthogmotregressors"] = motionregressors.shape[0] timings.append(["Motion filtering end", time.time(), numspatiallocs, "voxels"]) tide_io.writenpvecs(motionregressors, outputroot + "_orthogonalizedmotion.txt") if savemotionglmfilt: tide_io.savetonifti( fmri_data.reshape((xsize, ysize, numslices, timepoints)), theheader, outputroot + "_motionfiltered", ) timings.append(["Motion filtered data saved", time.time(), numspatiallocs, "voxels"]) # get slice times slicetimes = tide_io.getslicetimesfromfile(slicetimename) timings.append(["Slice times determined", time.time(), None, None]) # normalize the input data tide_util.logmem("before normalization", file=memfile) normdata, demeandata, means = normalizevoxels( fmri_data, detrendorder, validvoxels, time, timings, showprogressbar=showprogressbar, ) normdata_byslice = normdata.reshape((xsize * ysize, numslices, timepoints)) # read in estimation mask if present. Otherwise, otherwise use intensity mask. infodict["estmaskname"] = estmaskname if debug: print(estmaskname) if estmaskname is not None: tide_util.logmem("before reading in estmask", file=memfile) estmask_byslice = readextmask(estmaskname, nim_hdr, xsize, ysize, numslices) * np.float64( mask_byslice ) print("using estmask from file", estmaskname) numpasses = 1 else: # just fall back to the intensity mask estmask_byslice = mask_byslice.astype("float64") numpasses = 2 print("not using separate estimation mask - doing initial estimate using intensity mask") if fliparteries: # add another pass to refine the waveform after getting the new appflips numpasses += 1 print("adding a pass to regenerate cardiac waveform using bettter appflips") infodict["numpasses"] = numpasses # if we have an estimation mask, run procedure once. If not, run once to get a vessel mask, then rerun. appflips_byslice = None for thispass in range(numpasses): if numpasses > 1: print() print() print("starting pass", thispass + 1, "of", numpasses) passstring = " - pass " + str(thispass + 1) else: passstring = "" # now get an estimate of the cardiac signal print("estimating cardiac signal from fmri data") tide_util.logmem("before cardiacfromimage", file=memfile) ( cardfromfmri_sliceres, cardfromfmri_normfac, respfromfmri_sliceres, respfromfmri_normfac, slicesamplerate, numsteps, cycleaverage, slicenorms, ) = physiofromimage( normdata_byslice, estmask_byslice, numslices, timepoints, tr, slicetimes, thecardbandfilter, therespbandfilter, madnorm=domadnorm, nprocs=nprocs, notchpct=notchpct, fliparteries=fliparteries, arteriesonly=arteriesonly, usemask=usemaskcardfromfmri, appflips_byslice=appflips_byslice, debug=debug, verbose=verbose, ) timings.append( [ "Cardiac signal generated from image data" + passstring, time.time(), None, None, ] ) infodict["cardfromfmri_normfac"] = cardfromfmri_normfac slicetimeaxis = np.linspace( 0.0, tr * timepoints, num=(timepoints * numsteps), endpoint=False ) if thispass == numpasses - 1: tide_io.writevec(cycleaverage, outputroot + "_cycleaverage.txt") tide_io.writevec(cardfromfmri_sliceres, outputroot + "_cardfromfmri_sliceres.txt") else: if saveintermediate: tide_io.writevec( cycleaverage, outputroot + "_cycleaverage_pass" + str(thispass + 1) + ".txt", ) tide_io.writevec( cardfromfmri_sliceres, outputroot + "_cardfromfmri_sliceres_pass" + str(thispass + 1) + ".txt", ) # stash away a copy of the waveform if we need it later raw_cardfromfmri_sliceres = np.array(cardfromfmri_sliceres) # find bad points in cardiac from fmri thebadcardpts = findbadpts( cardfromfmri_sliceres, "cardfromfmri_sliceres", outputroot, slicesamplerate, infodict, ) cardiacwaveform = np.array(cardfromfmri_sliceres) badpointlist = np.array(thebadcardpts) infodict["slicesamplerate"] = slicesamplerate infodict["numcardpts_sliceres"] = timepoints * numsteps infodict["numsteps"] = numsteps infodict["slicenorms"] = slicenorms # find key components of cardiac waveform print("extracting harmonic components") if outputlevel > 1: if thispass == numpasses - 1: tide_io.writevec( cardfromfmri_sliceres * (1.0 - thebadcardpts), outputroot + "_cardfromfmri_sliceres_censored.txt", ) peakfreq_bold = getcardcoeffs( (1.0 - thebadcardpts) * cardiacwaveform, slicesamplerate, minhr=minhr, maxhr=maxhr, smoothlen=smoothlen, debug=debug, ) infodict["cardiacbpm_bold"] = np.round(peakfreq_bold * 60.0, 2) infodict["cardiacfreq_bold"] = peakfreq_bold timings.append( [ "Cardiac signal from image data analyzed" + passstring, time.time(), None, None, ] ) # resample to standard frequency cardfromfmri_stdres = tide_math.madnormalize( tide_resample.arbresample( cardfromfmri_sliceres, slicesamplerate, stdfreq, decimate=True, debug=False, ) ) if thispass == numpasses - 1: tide_io.writevec( cardfromfmri_stdres, outputroot + "_cardfromfmri_" + str(stdfreq) + "Hz.txt", ) else: if saveintermediate: tide_io.writevec( cardfromfmri_stdres, outputroot + "_cardfromfmri_" + str(stdfreq) + "Hz_pass" + str(thispass + 1) + ".txt", ) infodict["numcardpts_stdres"] = len(cardfromfmri_stdres) # normalize the signal to remove envelope effects ( filtcardfromfmri_stdres, normcardfromfmri_stdres, cardfromfmrienv_stdres, envmean, ) = cleancardiac( stdfreq, cardfromfmri_stdres, cutoff=envcutoff, nyquist=slicesamplerate / 2.0, thresh=envthresh, ) if thispass == numpasses - 1: tide_io.writevec( normcardfromfmri_stdres, outputroot + "_normcardfromfmri_" + str(stdfreq) + "Hz.txt", ) tide_io.writevec( cardfromfmrienv_stdres, outputroot + "_cardfromfmrienv_" + str(stdfreq) + "Hz.txt", ) else: if saveintermediate: tide_io.writevec( normcardfromfmri_stdres, outputroot + "_normcardfromfmri_" + str(stdfreq) + "Hz_pass" + str(thispass + 1) + ".txt", ) tide_io.writevec( cardfromfmrienv_stdres, outputroot + "_cardfromfmrienv_" + str(stdfreq) + "Hz_pass" + str(thispass + 1) + ".txt", ) # calculate quality metrics calcplethquality( normcardfromfmri_stdres, stdfreq, infodict, "_bold", outputroot, outputlevel=outputlevel, ) thebadcardpts_stdres = findbadpts( cardfromfmri_stdres, "cardfromfmri_" + str(stdfreq) + "Hz", outputroot, stdfreq, infodict, ) timings.append( [ "Cardiac signal from image data resampled and saved" + passstring, time.time(), None, None, ] ) # apply the deep learning filter if we're going to do that if dodlfilter: if dlfilterexists: if mpfix: print("performing super dangerous openmp workaround") os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE" modelpath = os.path.join( os.path.split(os.path.split(os.path.split(__file__)[0])[0])[0], "rapidtide", "data", "models", ) thedlfilter = tide_dlfilt.DeepLearningFilter(modelpath=modelpath) thedlfilter.loadmodel(modelname) infodict["dlfiltermodel"] = modelname normdlfilteredcard = thedlfilter.apply(normcardfromfmri_stdres) dlfilteredcard = thedlfilter.apply(cardfromfmri_stdres) if thispass == numpasses - 1: tide_io.writevec( normdlfilteredcard, outputroot + "_normcardfromfmri_dlfiltered_" + str(stdfreq) + "Hz.txt", ) tide_io.writevec( dlfilteredcard, outputroot + "_cardfromfmri_dlfiltered_" + str(stdfreq) + "Hz.txt", ) else: if saveintermediate: tide_io.writevec( normdlfilteredcard, outputroot + "_normcardfromfmri_dlfiltered_" + str(stdfreq) + "Hz_pass" + str(thispass + 1) + ".txt", ) tide_io.writevec( dlfilteredcard, outputroot + "_cardfromfmri_dlfiltered_" + str(stdfreq) + "Hz_pass" + str(thispass + 1) + ".txt", ) # calculate quality metrics calcplethquality( dlfilteredcard, stdfreq, infodict, "_dlfiltered", outputroot, outputlevel=outputlevel, ) # downsample to sliceres from stdres # cardfromfmri_sliceres = tide_math.madnormalize( # tide_resample.arbresample(dlfilteredcard, stdfreq, slicesamplerate, decimate=True, debug=False)) stdtimeaxis = (1.0 / stdfreq) * np.linspace( 0.0, len(dlfilteredcard), num=(len(dlfilteredcard)), endpoint=False ) arb_lowerstop = 0.0 arb_lowerpass = 0.0 arb_upperpass = slicesamplerate / 2.0 arb_upperstop = slicesamplerate / 2.0 theaafilter = tide_filt.NoncausalFilter(filtertype="arb") theaafilter.setfreqs(arb_lowerstop, arb_lowerpass, arb_upperpass, arb_upperstop) cardfromfmri_sliceres = tide_math.madnormalize( tide_resample.doresample( stdtimeaxis, theaafilter.apply(stdfreq, dlfilteredcard), slicetimeaxis, method="univariate", padlen=0, ) ) if thispass == numpasses - 1: tide_io.writevec( cardfromfmri_sliceres, outputroot + "_cardfromfmri_dlfiltered_sliceres.txt", ) infodict["used_dlreconstruction_filter"] = True peakfreq_dlfiltered = getcardcoeffs( cardfromfmri_sliceres, slicesamplerate, minhr=minhr, maxhr=maxhr, smoothlen=smoothlen, debug=debug, ) infodict["cardiacbpm_dlfiltered"] = np.round(peakfreq_dlfiltered * 60.0, 2) infodict["cardiacfreq_dlfiltered"] = peakfreq_dlfiltered # check the match between the raw and filtered cardiac signals maxval, maxdelay, failreason = checkcardmatch( raw_cardfromfmri_sliceres, cardfromfmri_sliceres, slicesamplerate, debug=debug, ) print( "Filtered cardiac fmri waveform delay is", maxdelay, "relative to raw fMRI data", ) print("Correlation coefficient between cardiac regressors:", maxval) infodict["corrcoeff_raw2filt"] = maxval + 0 infodict["delay_raw2filt"] = maxdelay + 0 infodict["failreason_raw2filt"] = failreason + 0 timings.append( [ "Deep learning filter applied" + passstring, time.time(), None, None, ] ) else: print("dlfilter could not be loaded - skipping") # get the cardiac signal from a file, if specified if cardiacfilename is not None: tide_util.logmem("before cardiacfromfile", file=memfile) pleth_sliceres, pleth_stdres = getphysiofile( cardiacfilename, colnum, colname, inputfreq, inputstart, slicetimeaxis, stdfreq, envcutoff, envthresh, timings, infodict, outputroot, outputlevel=outputlevel, debug=False, ) if dodlfilter and dlfilterexists: maxval, maxdelay, failreason = checkcardmatch( pleth_sliceres, cardfromfmri_sliceres, slicesamplerate, debug=debug ) print( "Input cardiac waveform delay is", maxdelay, "relative to filtered fMRI data", ) print("Correlation coefficient between cardiac regressors:", maxval) infodict["corrcoeff_filt2pleth"] = maxval + 0 infodict["delay_filt2pleth"] = maxdelay + 0 infodict["failreason_filt2pleth"] = failreason + 0 # check the match between the bold and physio cardiac signals maxval, maxdelay, failreason = checkcardmatch( pleth_sliceres, raw_cardfromfmri_sliceres, slicesamplerate, debug=debug ) print("Input cardiac waveform delay is", maxdelay, "relative to fMRI data") print("Correlation coefficient between cardiac regressors:", maxval) infodict["corrcoeff_raw2pleth"] = maxval + 0 infodict["delay_raw2pleth"] = maxdelay + 0 infodict["failreason_raw2pleth"] = failreason + 0 # align the pleth signal with the cardiac signal derived from the data if aligncardiac: alignpts_sliceres = -maxdelay / slicesamplerate # maxdelay is in seconds pleth_sliceres, dummy1, dummy2, dummy2 = tide_resample.timeshift( pleth_sliceres, alignpts_sliceres, int(10.0 * slicesamplerate) ) alignpts_stdres = -maxdelay * stdfreq # maxdelay is in seconds pleth_stdres, dummy1, dummy2, dummy3 = tide_resample.timeshift( pleth_stdres, alignpts_stdres, int(10.0 * stdfreq) ) if thispass == numpasses - 1: tide_io.writevec(pleth_sliceres, outputroot + "_pleth_sliceres.txt") tide_io.writevec(pleth_stdres, outputroot + "_pleth_" + str(stdfreq) + "Hz.txt") # now clean up cardiac signal filtpleth_stdres, normpleth_stdres, plethenv_stdres, envmean = cleancardiac( stdfreq, pleth_stdres, cutoff=envcutoff, thresh=envthresh ) if thispass == numpasses - 1: tide_io.writevec( normpleth_stdres, outputroot + "_normpleth_" + str(stdfreq) + "Hz.txt", ) tide_io.writevec( plethenv_stdres, outputroot + "_plethenv_" + str(stdfreq) + "Hz.txt" ) # calculate quality metrics calcplethquality( filtpleth_stdres, stdfreq, infodict, "_pleth", outputroot, outputlevel=outputlevel, ) if dodlfilter and dlfilterexists: dlfilteredpleth = thedlfilter.apply(pleth_stdres) if thispass == numpasses - 1: tide_io.writevec( dlfilteredpleth, outputroot + "_pleth_dlfiltered_" + str(stdfreq) + "Hz.txt", ) maxval, maxdelay, failreason = checkcardmatch( pleth_stdres, dlfilteredpleth, stdfreq, debug=debug ) print( "Filtered pleth cardiac waveform delay is", maxdelay, "relative to raw pleth data", ) print("Correlation coefficient between pleth regressors:", maxval) infodict["corrcoeff_pleth2filtpleth"] = maxval + 0 infodict["delay_pleth2filtpleth"] = maxdelay + 0 infodict["failreason_pleth2filtpleth"] = failreason + 0 # find bad points in plethysmogram thebadplethpts_sliceres = findbadpts( pleth_sliceres, "pleth_sliceres", outputroot, slicesamplerate, infodict, thetype="fracval", ) thebadplethpts_stdres = findbadpts( pleth_stdres, "pleth_" + str(stdfreq) + "Hz", outputroot, stdfreq, infodict, thetype="fracval", ) timings.append( [ "Cardiac signal from physiology data resampled to standard and saved" + passstring, time.time(), None, None, ] ) # find key components of cardiac waveform filtpleth = tide_math.madnormalize( thecardbandfilter.apply(slicesamplerate, pleth_sliceres) ) peakfreq_file = getcardcoeffs( (1.0 - thebadplethpts_sliceres) * filtpleth, slicesamplerate, minhr=minhr, maxhr=maxhr, smoothlen=smoothlen, debug=debug, ) timings.append( [ "Cardiac coefficients calculated from pleth waveform" + passstring, time.time(), None, None, ] ) infodict["cardiacbpm_pleth"] = np.round(peakfreq_file * 60.0, 2) infodict["cardiacfreq_pleth"] = peakfreq_file timings.append( [ "Cardiac signal from physiology data analyzed" + passstring, time.time(), None, None, ] ) timings.append( [ "Cardiac parameters extracted from physiology data" + passstring, time.time(), None, None, ] ) if not projectwithraw: cardiacwaveform = np.array(pleth_sliceres) badpointlist = 1.0 - (1.0 - thebadplethpts_sliceres) * (1.0 - badpointlist) infodict["pleth"] = True peakfreq = peakfreq_file else: infodict["pleth"] = False peakfreq = peakfreq_bold if outputlevel > 0: if thispass == numpasses - 1: tide_io.writevec(badpointlist, outputroot + "_overall_sliceres_badpts.txt") # extract the fundamental if forcedhr is not None: peakfreq = forcedhr infodict["forcedhr"] = peakfreq if cardiacfilename is None: filthiresfund = tide_math.madnormalize( getfundamental(cardiacwaveform * (1.0 - thebadcardpts), slicesamplerate, peakfreq) ) else: filthiresfund = tide_math.madnormalize( getfundamental(cardiacwaveform, slicesamplerate, peakfreq) ) if outputlevel > 1: if thispass == numpasses - 1: tide_io.writevec(filthiresfund, outputroot + "_cardiacfundamental_sliceres.txt") # now calculate the phase waveform tide_util.logmem("before analytic phase analysis", file=memfile) instantaneous_phase, amplitude_envelope = tide_fit.phaseanalysis(filthiresfund) if outputlevel > 0: if thispass == numpasses - 1: tide_io.writevec(amplitude_envelope, outputroot + "_ampenv_sliceres.txt") tide_io.writevec( instantaneous_phase, outputroot + "_instphase_unwrapped_sliceres.txt", ) if filtphase: print("filtering phase waveform") instantaneous_phase = tide_math.trendfilt(instantaneous_phase, debug=False) if outputlevel > 1: if thispass == numpasses - 1: tide_io.writevec( instantaneous_phase, outputroot + "_filtered_instphase_unwrapped.txt", ) initialphase = instantaneous_phase[0] infodict["phi0"] = initialphase timings.append(["Phase waveform generated" + passstring, time.time(), None, None]) # account for slice time offests offsets_byslice = np.zeros((xsize * ysize, numslices), dtype=np.float64) for i in range(numslices): offsets_byslice[:, i] = slicetimes[i] # remap offsets to space by time fmri_offsets = offsets_byslice.reshape(numspatiallocs) # save the information file if saveinfoasjson: tide_io.writedicttojson(infodict, outputroot + "_info.json") else: tide_io.writedict(infodict, outputroot + "_info.txt") # interpolate the instantaneous phase upsampledslicetimeaxis = np.linspace( 0.0, tr * timepoints, num=(timepoints * numsteps * upsamplefac), endpoint=False, ) interpphase = tide_math.phasemod( tide_resample.doresample( slicetimeaxis, instantaneous_phase, upsampledslicetimeaxis, method="univariate", padlen=0, ), centric=centric, ) if outputlevel > 1: if thispass == numpasses - 1: tide_io.writevec(interpphase, outputroot + "_interpinstphase.txt") if cardcalconly: print("cardiac waveform calculations done - exiting") # Process and save timing information nodeline = "Processed on " + platform.node() tide_util.proctiminginfo( timings, outputfile=outputroot + "_runtimings.txt", extraheader=nodeline ) tide_util.logmem("final", file=memfile) sys.exit() # find the phase values for all timepoints in all slices phasevals = np.zeros((numslices, timepoints), dtype=np.float64) thetimes = [] for theslice in range(numslices): thetimes.append( np.linspace(0.0, tr * timepoints, num=timepoints, endpoint=False) + slicetimes[theslice] ) phasevals[theslice, :] = tide_math.phasemod( tide_resample.doresample( slicetimeaxis, instantaneous_phase, thetimes[-1], method="univariate", padlen=0, ), centric=centric, ) if debug: if thispass == numpasses - 1: tide_io.writevec( thetimes[-1], outputroot + "_times_" + str(theslice).zfill(2) + ".txt", ) tide_io.writevec( phasevals[theslice, :], outputroot + "_phasevals_" + str(theslice).zfill(2) + ".txt", ) timings.append( [ "Slice phases determined for all timepoints" + passstring, time.time(), None, None, ] ) # construct the destination arrays tide_util.logmem("before making destination arrays", file=memfile) app = np.zeros((xsize, ysize, numslices, destpoints), dtype=np.float64) app_byslice = app.reshape((xsize * ysize, numslices, destpoints)) cine = np.zeros((xsize, ysize, numslices, destpoints), dtype=np.float64) cine_byslice = cine.reshape((xsize * ysize, numslices, destpoints)) rawapp = np.zeros((xsize, ysize, numslices, destpoints), dtype=np.float64) rawapp_byslice = rawapp.reshape((xsize * ysize, numslices, destpoints)) corrected_rawapp = np.zeros((xsize, ysize, numslices, destpoints), dtype=np.float64) corrected_rawapp_byslice = rawapp.reshape((xsize * ysize, numslices, destpoints)) normapp = np.zeros((xsize, ysize, numslices, destpoints), dtype=np.float64) normapp_byslice = normapp.reshape((xsize * ysize, numslices, destpoints)) weights = np.zeros((xsize, ysize, numslices, destpoints), dtype=np.float64) weight_byslice = weights.reshape((xsize * ysize, numslices, destpoints)) derivatives = np.zeros((xsize, ysize, numslices, 4), dtype=np.float64) derivatives_byslice = derivatives.reshape((xsize * ysize, numslices, 4)) timings.append(["Output arrays allocated" + passstring, time.time(), None, None]) if centric: outphases = np.linspace(-np.pi, np.pi, num=destpoints, endpoint=False) else: outphases = np.linspace(0.0, 2.0 * np.pi, num=destpoints, endpoint=False) phasestep = outphases[1] - outphases[0] ####################################################################################################### # # now do the phase projection # # demeandata_byslice = demeandata.reshape((xsize * ysize, numslices, timepoints)) means_byslice = means.reshape((xsize * ysize, numslices)) timings.append(["Phase projection to image started" + passstring, time.time(), None, None]) print("starting phase projection") proctrs = range(timepoints) # proctrs is the list of all fmri trs to be projected procpoints = range( timepoints * numsteps ) # procpoints is the list of all sliceres datapoints to be projected if censorbadpts: censortrs = np.zeros(timepoints, dtype="int") censorpoints = np.zeros(timepoints * numsteps, dtype="int") censortrs[np.where(badpointlist > 0.0)[0] // numsteps] = 1 censorpoints[np.where(badpointlist > 0.0)[0]] = 1 proctrs = np.where(censortrs < 1)[0] procpoints = np.where(censorpoints < 1)[0] # do phase averaging app_bypoint = cardiaccycleaverage( instantaneous_phase, outphases, cardfromfmri_sliceres, procpoints, congridbins, gridkernel, centric, cyclic=True, ) if thispass == numpasses - 1: tide_io.writevec(app_bypoint, outputroot + "_cardcyclefromfmri.txt") # now do time averaging lookaheadval = int(slicesamplerate / 4.0) print("lookaheadval = ", lookaheadval) wrappedphase = tide_math.phasemod(instantaneous_phase, centric=centric) max_peaks, min_peaks = tide_fit.peakdetect(wrappedphase, lookahead=lookaheadval) # start on a maximum if max_peaks[0][0] > min_peaks[0][0]: min_peaks = min_peaks[1:] # work only with pairs if len(max_peaks) > len(min_peaks): max_peaks = max_peaks[:-1] # max_peaks, min_peaks = findphasecuts(tide_math.phasemod(instantaneous_phase, centric=centric)) zerophaselocs = [] for idx, peak in enumerate(max_peaks): minloc = min_peaks[idx][0] maxloc = max_peaks[idx][0] minval = min_peaks[idx][1] maxval = max_peaks[idx][1] if minloc > 0: if wrappedphase[minloc - 1] < wrappedphase[minloc]: minloc -= 1 minval = wrappedphase[minloc] phasediff = minval - (maxval - 2.0 * np.pi) timediff = minloc - maxloc zerophaselocs.append(1.0 * minloc - (minval - outphases[0]) * timediff / phasediff) # print(idx, [maxloc, maxval], [minloc, minval], phasediff, timediff, zerophaselocs[-1]) instantaneous_time = instantaneous_phase * 0.0 whichpeak = 0 for t in procpoints: if whichpeak < len(zerophaselocs) - 1: if t > zerophaselocs[whichpeak + 1]: whichpeak += 1 if t > zerophaselocs[whichpeak]: instantaneous_time[t] = (t - zerophaselocs[whichpeak]) / slicesamplerate # print(t, whichpeak, zerophaselocs[whichpeak], instantaneous_time[t]) maxtime = ( np.ceil( int(1.02 * tide_stats.getfracval(instantaneous_time, 0.98) // pulsereconstepsize) ) * pulsereconstepsize ) outtimes = np.linspace(0.0, maxtime, num=int(maxtime / pulsereconstepsize), endpoint=False) atp_bypoint = cardiaccycleaverage( instantaneous_time, outtimes, cardfromfmri_sliceres, procpoints, congridbins, gridkernel, False, cyclic=True, ) if thispass == numpasses - 1: tide_io.writevec(atp_bypoint, outputroot + "_cardpulsefromfmri.txt") else: if saveintermediate: tide_io.writevec( atp_bypoint, outputroot + "_cardpulsefromfmri_pass" + str(thispass + 1) + ".txt", ) if not verbose: print("phase projecting...") # make a lowpass filter for the projected data. Limit frequency to 3 cycles per 2pi (1/6th Fs) phaseFs = 1.0 / phasestep phaseFc = phaseFs / 6.0 appsmoothingfilter = tide_filt.NoncausalFilter("arb", cyclic=True, padtime=0.0) appsmoothingfilter.setfreqs(0.0, 0.0, phaseFc, phaseFc) # setup for aliased correlation if we're going to do it if doaliasedcorrelation and (thispass == numpasses - 1): if cardiacfilename: signal_stdres = pleth_stdres else: signal_stdres = dlfilteredcard corrsearchvals = ( np.linspace(0.0, aliasedcorrelationwidth, num=aliasedcorrelationpts) - aliasedcorrelationwidth / 2.0 ) theAliasedCorrelator = tide_corr.AliasedCorrelator( signal_stdres, stdfreq, mrsamplerate, corrsearchvals, padtime=aliasedcorrelationwidth, ) thecorrfunc = np.zeros( (xsize, ysize, numslices, aliasedcorrelationpts), dtype=np.float64 ) thecorrfunc_byslice = thecorrfunc.reshape( (xsize * ysize, numslices, aliasedcorrelationpts) ) wavedelay = np.zeros((xsize, ysize, numslices), dtype=np.float) wavedelay_byslice = wavedelay.reshape((xsize * ysize, numslices)) waveamp = np.zeros((xsize, ysize, numslices), dtype=np.float) waveamp_byslice = waveamp.reshape((xsize * ysize, numslices)) # now project the data fmri_data_byslice = input_data.byslice() for theslice in range(numslices): if showprogressbar: tide_util.progressbar(theslice + 1, numslices, label="Percent complete") if verbose: print("phase projecting for slice", theslice) validlocs = np.where(projmask_byslice[:, theslice] > 0)[0] # indexlist = range(0, len(phasevals[theslice, :])) if len(validlocs) > 0: for t in proctrs: filteredmr = -demeandata_byslice[validlocs, theslice, t] cinemr = fmri_data_byslice[validlocs, theslice, t] thevals, theweights, theindices = tide_resample.congrid( outphases, phasevals[theslice, t], 1.0, congridbins, kernel=gridkernel, cyclic=True, ) for i in range(len(theindices)): weight_byslice[validlocs, theslice, theindices[i]] += theweights[i] rawapp_byslice[validlocs, theslice, theindices[i]] += ( theweights[i] * filteredmr ) cine_byslice[validlocs, theslice, theindices[i]] += theweights[i] * cinemr for d in range(destpoints): if weight_byslice[validlocs[0], theslice, d] == 0.0: weight_byslice[validlocs, theslice, d] = 1.0 rawapp_byslice[validlocs, theslice, :] = np.nan_to_num( rawapp_byslice[validlocs, theslice, :] / weight_byslice[validlocs, theslice, :] ) cine_byslice[validlocs, theslice, :] = np.nan_to_num( cine_byslice[validlocs, theslice, :] / weight_byslice[validlocs, theslice, :] ) else: rawapp_byslice[:, theslice, :] = 0.0 cine_byslice[:, theslice, :] = 0.0 # smooth the projected data along the time dimension if smoothapp: for loc in validlocs: rawapp_byslice[loc, theslice, :] = appsmoothingfilter.apply( phaseFs, rawapp_byslice[loc, theslice, :] ) derivatives_byslice[loc, theslice, :] = circularderivs( rawapp_byslice[loc, theslice, :] ) appflips_byslice = np.where( -derivatives_byslice[:, :, 2] > derivatives_byslice[:, :, 0], -1.0, 1.0 ) timecoursemean = np.mean(rawapp_byslice[validlocs, theslice, :], axis=1).reshape( (-1, 1) ) if fliparteries: corrected_rawapp_byslice[validlocs, theslice, :] = ( rawapp_byslice[validlocs, theslice, :] - timecoursemean ) * appflips_byslice[validlocs, theslice, None] + timecoursemean if doaliasedcorrelation and (thispass == numpasses - 1): for theloc in validlocs: thecorrfunc_byslice[theloc, theslice, :] = theAliasedCorrelator.apply( -appflips_byslice[theloc, theslice] * demeandata_byslice[theloc, theslice, :], -thetimes[theslice][0], ) maxloc = np.argmax(thecorrfunc_byslice[theloc, theslice, :]) wavedelay_byslice[theloc, theslice] = corrsearchvals[maxloc] waveamp_byslice[theloc, theslice] = thecorrfunc_byslice[ theloc, theslice, maxloc ] else: corrected_rawapp_byslice[validlocs, theslice, :] = rawapp_byslice[ validlocs, theslice, : ] if doaliasedcorrelation and (thispass == numpasses - 1): for theloc in validlocs: thecorrfunc_byslice[theloc, theslice, :] = theAliasedCorrelator.apply( -demeandata_byslice[theloc, theslice, :], -thetimes[theslice][0], ) maxloc = np.argmax(np.abs(thecorrfunc_byslice[theloc, theslice, :])) wavedelay_byslice[theloc, theslice] = corrsearchvals[maxloc] waveamp_byslice[theloc, theslice] = thecorrfunc_byslice[ theloc, theslice, maxloc ] timecoursemin = np.min( corrected_rawapp_byslice[validlocs, theslice, :], axis=1 ).reshape((-1, 1)) app_byslice[validlocs, theslice, :] = ( corrected_rawapp_byslice[validlocs, theslice, :] - timecoursemin ) normapp_byslice[validlocs, theslice, :] = np.nan_to_num( app_byslice[validlocs, theslice, :] / means_byslice[validlocs, theslice, None] ) if not verbose: print(" done") timings.append( [ "Phase projection to image completed" + passstring, time.time(), None, None, ] ) print("phase projection done") # save the analytic phase projection image theheader = copy.deepcopy(nim_hdr) theheader["dim"][4] = destpoints theheader["toffset"] = -np.pi theheader["pixdim"][4] = 2.0 * np.pi / destpoints if thispass == numpasses - 1: tide_io.savetonifti(app, theheader, outputroot + "_app") tide_io.savetonifti(normapp, theheader, outputroot + "_normapp") tide_io.savetonifti(cine, theheader, outputroot + "_cine") if outputlevel > 0: tide_io.savetonifti(rawapp, theheader, outputroot + "_rawapp") timings.append(["Phase projected data saved" + passstring, time.time(), None, None]) if doaliasedcorrelation and thispass == numpasses - 1: theheader = copy.deepcopy(nim_hdr) theheader["dim"][4] = aliasedcorrelationpts theheader["toffset"] = 0.0 theheader["pixdim"][4] = corrsearchvals[1] - corrsearchvals[0] tide_io.savetonifti(thecorrfunc, theheader, outputroot + "_corrfunc") theheader["dim"][4] = 1 tide_io.savetonifti(wavedelay, theheader, outputroot + "_wavedelay") tide_io.savetonifti(waveamp, theheader, outputroot + "_waveamp") # make and save a voxel intensity histogram if unnormvesselmap: app2d = app.reshape((numspatiallocs, destpoints)) else: app2d = normapp.reshape((numspatiallocs, destpoints)) validlocs = np.where(mask > 0)[0] histinput = app2d[validlocs, :].reshape((len(validlocs), destpoints)) if outputlevel > 0: tide_stats.makeandsavehistogram(histinput, histlen, 0, outputroot + "_histogram") # find vessel threshholds tide_util.logmem("before making vessel masks", file=memfile) hardvesselthresh = tide_stats.getfracvals(np.max(histinput, axis=1), [0.98])[0] / 2.0 softvesselthresh = softvesselfrac * hardvesselthresh print("hard, soft vessel threshholds set to", hardvesselthresh, softvesselthresh) # save a vessel masked version of app if unnormvesselmap: vesselmask = np.where(np.max(app, axis=3) > softvesselthresh, 1, 0) else: vesselmask = np.where(np.max(normapp, axis=3) > softvesselthresh, 1, 0) maskedapp2d = np.array(app2d) maskedapp2d[np.where(vesselmask.reshape(numspatiallocs) == 0)[0], :] = 0.0 if outputlevel > 1: if thispass == numpasses - 1: tide_io.savetonifti( maskedapp2d.reshape((xsize, ysize, numslices, destpoints)), theheader, outputroot + "_maskedapp", ) del maskedapp2d timings.append( [ "Vessel masked phase projected data saved" + passstring, time.time(), None, None, ] ) # save multiple versions of the hard vessel mask if unnormvesselmap: vesselmask = np.where(np.max(app, axis=3) > hardvesselthresh, 1, 0) minphase = np.argmin(app, axis=3) * 2.0 * np.pi / destpoints - np.pi maxphase = np.argmax(app, axis=3) * 2.0 * np.pi / destpoints - np.pi else: vesselmask = np.where(np.max(normapp, axis=3) > hardvesselthresh, 1, 0) minphase = np.argmin(normapp, axis=3) * 2.0 * np.pi / destpoints - np.pi maxphase = np.argmax(normapp, axis=3) * 2.0 * np.pi / destpoints - np.pi risediff = (maxphase - minphase) * vesselmask arteries = np.where(appflips_byslice.reshape((xsize, ysize, numslices)) < 0, vesselmask, 0) veins = np.where(appflips_byslice.reshape((xsize, ysize, numslices)) > 0, vesselmask, 0) theheader = copy.deepcopy(nim_hdr) theheader["dim"][4] = 1 if thispass == numpasses - 1: tide_io.savetonifti(vesselmask, theheader, outputroot + "_vesselmask") if outputlevel > 0: tide_io.savetonifti(minphase, theheader, outputroot + "_minphase") tide_io.savetonifti(maxphase, theheader, outputroot + "_maxphase") tide_io.savetonifti(arteries, theheader, outputroot + "_arteries") tide_io.savetonifti(veins, theheader, outputroot + "_veins") timings.append(["Masks saved" + passstring, time.time(), None, None]) # now get ready to start again with a new mask estmask_byslice = vesselmask.reshape((xsize * ysize, numslices)) + 0 # save a vessel image if unnormvesselmap: vesselmap = np.max(app, axis=3) else: vesselmap = np.max(normapp, axis=3) tide_io.savetonifti(vesselmap, theheader, outputroot + "_vesselmap") tide_io.savetonifti( np.where(appflips_byslice.reshape((xsize, ysize, numslices)) < 0, vesselmap, 0.0), theheader, outputroot + "_arterymap", ) tide_io.savetonifti( np.where(appflips_byslice.reshape((xsize, ysize, numslices)) > 0, vesselmap, 0.0), theheader, outputroot + "_veinmap", ) # now generate aliased cardiac signals and regress them out of the data if dotemporalglm or dospatialglm: # generate the signals timings.append(["Cardiac signal regression started", time.time(), None, None]) tide_util.logmem("before cardiac regression", file=memfile) print("generating cardiac regressors") cardiacnoise = fmri_data * 0.0 cardiacnoise_byslice = cardiacnoise.reshape((xsize * ysize, numslices, timepoints)) phaseindices = (cardiacnoise * 0.0).astype(np.int16) phaseindices_byslice = phaseindices.reshape((xsize * ysize, numslices, timepoints)) for theslice in range(numslices): print("calculating cardiac noise for slice", theslice) validlocs = np.where(projmask_byslice[:, theslice] > 0)[0] for t in range(timepoints): phaseindices_byslice[validlocs, theslice, t] = tide_util.valtoindex( outphases, phasevals[theslice, t] ) cardiacnoise_byslice[validlocs, theslice, t] = rawapp_byslice[ validlocs, theslice, phaseindices_byslice[validlocs, theslice, t] ] theheader = copy.deepcopy(nim_hdr) timings.append(["Cardiac signal generated", time.time(), None, None]) if savecardiacnoise: tide_io.savetonifti( cardiacnoise.reshape((xsize, ysize, numslices, timepoints)), theheader, outputroot + "_cardiacnoise", ) tide_io.savetonifti( phaseindices.reshape((xsize, ysize, numslices, timepoints)), theheader, outputroot + "_phaseindices", ) timings.append(["Cardiac signal saved", time.time(), None, None]) # now remove them tide_util.logmem("before cardiac removal", file=memfile) print("removing cardiac signal with GLM") filtereddata = 0.0 * fmri_data validlocs = np.where(mask > 0)[0] numvalidspatiallocs = len(validlocs) threshval = 0.0 if dospatialglm: meanvals = np.zeros(timepoints, dtype=np.float64) rvals = np.zeros(timepoints, dtype=np.float64) r2vals = np.zeros(timepoints, dtype=np.float64) fitcoffs = np.zeros(timepoints, dtype=np.float64) fitNorm = np.zeros(timepoints, dtype=np.float64) datatoremove = 0.0 * fmri_data print("running spatial glm on", timepoints, "timepoints") tide_glmpass.glmpass( timepoints, fmri_data[validlocs, :], threshval, cardiacnoise[validlocs, :], meanvals, rvals, r2vals, fitcoffs, fitNorm, datatoremove[validlocs, :], filtereddata[validlocs, :], reportstep=(timepoints // 100), mp_chunksize=10, procbyvoxel=False, nprocs=nprocs, ) print(datatoremove.shape, cardiacnoise.shape, fitcoffs.shape) datatoremove[validlocs, :] = np.multiply(cardiacnoise[validlocs, :], fitcoffs[:, None]) filtereddata = fmri_data - datatoremove timings.append( [ "Cardiac signal spatial regression finished", time.time(), timepoints, "timepoints", ] ) tide_io.writevec(fitcoffs, outputroot + "_fitcoff.txt") tide_io.writevec(meanvals, outputroot + "_fitmean.txt") tide_io.writevec(rvals, outputroot + "_fitR.txt") theheader = copy.deepcopy(nim_hdr) tide_io.savetonifti( filtereddata.reshape((xsize, ysize, numslices, timepoints)), theheader, outputroot + "_temporalfiltereddata", ) tide_io.savetonifti( datatoremove.reshape((xsize, ysize, numslices, timepoints)), theheader, outputroot + "_temporaldatatoremove", ) timings.append( [ "Cardiac signal spatial regression files written", time.time(), None, None, ] ) if dotemporalglm: meanvals = np.zeros(numspatiallocs, dtype=np.float64) rvals = np.zeros(numspatiallocs, dtype=np.float64) r2vals = np.zeros(numspatiallocs, dtype=np.float64) fitcoffs = np.zeros(numspatiallocs, dtype=np.float64) fitNorm = np.zeros(numspatiallocs, dtype=np.float64) datatoremove = 0.0 * fmri_data print("running temporal glm on", numvalidspatiallocs, "voxels") tide_glmpass.glmpass( numvalidspatiallocs, fmri_data[validlocs, :], threshval, cardiacnoise[validlocs, :], meanvals[validlocs], rvals[validlocs], r2vals[validlocs], fitcoffs[validlocs], fitNorm[validlocs], datatoremove[validlocs, :], filtereddata[validlocs, :], procbyvoxel=True, nprocs=nprocs, ) datatoremove[validlocs, :] = np.multiply(cardiacnoise[validlocs, :], fitcoffs[:, None]) filtereddata[validlocs, :] = fmri_data[validlocs, :] - datatoremove timings.append( [ "Cardiac signal temporal regression finished", time.time(), numspatiallocs, "voxels", ] ) theheader = copy.deepcopy(nim_hdr) theheader["dim"][4] = 1 tide_io.savetonifti( fitcoffs.reshape((xsize, ysize, numslices)), theheader, outputroot + "_fitamp", ) tide_io.savetonifti( meanvals.reshape((xsize, ysize, numslices)), theheader, outputroot + "_fitamp", ) tide_io.savetonifti( rvals.reshape((xsize, ysize, numslices)), theheader, outputroot + "_fitR", ) theheader = copy.deepcopy(nim_hdr) tide_io.savetonifti( filtereddata.reshape((xsize, ysize, numslices, timepoints)), theheader, outputroot + "_temporalfiltereddata", ) tide_io.savetonifti( datatoremove.reshape((xsize, ysize, numslices, timepoints)), theheader, outputroot + "_temporaldatatoremove", ) timings.append( [ "Cardiac signal temporal regression files written", time.time(), None, None, ] ) timings.append(["Done", time.time(), None, None]) # Process and save timing information nodeline = "Processed on " + platform.node() tide_util.proctiminginfo( timings, outputfile=outputroot + "_runtimings.txt", extraheader=nodeline ) tide_util.logmem("final", file=memfile) if __name__ == "__main__": # grab the command line arguments then pass them off. nargs = len(sys.argv) if nargs < 4: usage() exit() happy_main(sys.argv)
bbfrederick/rapidtide
rapidtide/workflows/happy_legacy.py
Python
apache-2.0
113,373
[ "Gaussian" ]
144d43ce148e4b30e66e115fc8e54c4905b3676a31679acf059e76918793ff12
''' The Las Vegas board game consists of: * Six casinos, one for each dice value * Eight six-sided dice per player * Cash cards: $60,000 (5) $70,000 (5) $80,000 (5) $90,000 (5) $10,000 (6) $40,000 (6) $50,000 (6) $20,000 (8) $30,000 (8) * Between 2 and 5 players Setup: For each casino, draw money cards until it is >= $50,000 Assign the starting player card to someone (randomly?) For each player: If they still have dice: Roll all their dice Choose a dice value to play Put all of those dice on the matching casino End of round: For each card on the casino in decending order of value: Award to player with largest number of dice, if no other player has the same number of dice Shuffle the cash cards Repeat setup, move starting card to next player Winner is the player with the most money after 4 rounds ''' import random DICE_SIDES = 6 class Dice(object): def __init__(self, owner): self.owner = owner self.value = None def __repr__(self): return '{}: {}'.format(self.owner, self.value) def roll(self): self.value = random.randint(1, DICE_SIDES) class Casino(object): def __init__(self): self.cards = [] self.dice = [] def total_value(self): return sum(self.cards) class Round(object): def __init__(self): self.casinos = {value:Casino() for value in range(1, DICE_SIDES+1)} def deal_cards(self, cards): for value, casino in self.casinos.iteritems(): casino.dice = [] while casino.total_value() < 5: cardindex = random.randint(0, len(cards)-1) casino.cards.append(cards.pop(cardindex)) def play_turns(self, players): for player in players: player.init_dice() while any([player.has_dice_left() for player in players]): for player in players: if not player.has_dice_left(): continue print '\n{}\'s turn'.format(player.name) played_dice = player.get_turn(self.casinos) self.casinos[played_dice[0].value].dice.append(played_dice) class InteractivePlayer(object): def __init__(self, name): self.name = name self.init_dice() def init_dice(self): self.dice = [Dice(self.name) for _ in range(8)] def has_dice_left(self): if len(self.dice) > 0: return True else: return False def get_turn(self, casinos): ''' Takes a list of casinos, {num: casino} Returns which of its dice it is going to play ''' for dice in self.dice: dice.roll() print 'dice: {}'.format([d.value for d in self.dice]) print 'casinos:' for value, casino in casinos.iteritems(): print '\t{}'.format(value) for dice in casino.dice: print '\t\t{}'.format(dice) desired_dice = int(raw_input('{}, play which dice: '.format(self.name))) played_dice = [d for d in self.dice if d.value == desired_dice] # TODO: error check for dice_to_remove in played_dice: self.dice.remove(dice_to_remove) return played_dice class Game(object): def __init__(self, players): self.players = players self.cards = ( [6] * 5 + [7] * 5 + [8] * 5 + [9] * 5 + [1] * 6 + [4] * 6 + [5] * 6 + [2] * 8 + [3] * 8 ) self.rounds = [Round() for _ in range(4)] def play(self): for round_num, cur_round in enumerate(self.rounds): print '\nround {}'.format(round_num+1) cur_round.deal_cards(self.cards) cur_round.play_turns(self.players) if __name__ == '__main__': player_one = InteractivePlayer('alice') player_two = InteractivePlayer('boab') game = Game([player_one, player_two]) game.play()
olimoth/vegas
game.py
Python
gpl-3.0
4,142
[ "CASINO" ]
901468308c3b394eba36dfbe3fb14464dda4f62af8c3680190b5b7aa22149e70
# coding: utf-8 # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Interfaces to perform image registrations and to apply the resulting displacement maps to images and points. """ from __future__ import absolute_import import os.path as op import re from ..base import (CommandLine, CommandLineInputSpec, isdefined, TraitedSpec, File, traits, InputMultiPath) from .base import ElastixBaseInputSpec from ... import logging logger = logging.getLogger('interface') class RegistrationInputSpec(ElastixBaseInputSpec): fixed_image = File(exists=True, mandatory=True, argstr='-f %s', desc='fixed image') moving_image = File(exists=True, mandatory=True, argstr='-m %s', desc='moving image') parameters = InputMultiPath(File(exists=True), mandatory=True, argstr='-p %s...', desc='parameter file, elastix handles 1 or more -p') fixed_mask = File(exists=True, argstr='-fMask %s', desc='mask for fixed image') moving_mask = File(exists=True, argstr='-mMask %s', desc='mask for moving image') initial_transform = File(exists=True, argstr='-t0 %s', desc='parameter file for initial transform') class RegistrationOutputSpec(TraitedSpec): transform = InputMultiPath(File(exists=True), desc='output transform') warped_file = File(desc='input moving image warped to fixed image') warped_files = InputMultiPath(File(exists=False), desc=('input moving image warped to fixed image at each level')) warped_files_flags = traits.List(traits.Bool(False), desc='flag indicating if warped image was generated') class Registration(CommandLine): """ Elastix nonlinear registration interface Example ------- >>> from nipype.interfaces.elastix import Registration >>> reg = Registration() >>> reg.inputs.fixed_image = 'fixed1.nii' # doctest: +SKIP >>> reg.inputs.moving_image = 'moving1.nii' # doctest: +SKIP >>> reg.inputs.parameters = ['elastix.txt'] # doctest: +SKIP >>> reg.cmdline # doctest: +SKIP 'elastix -f fixed1.nii -m moving1.nii -out ./ -p elastix.txt' """ _cmd = 'elastix' input_spec = RegistrationInputSpec output_spec = RegistrationOutputSpec def _list_outputs(self): outputs = self._outputs().get() out_dir = op.abspath(self.inputs.output_path) opts = ['WriteResultImage', 'ResultImageFormat'] regex = re.compile(r'^\((\w+)\s(.+)\)$') outputs['transform'] = [] outputs['warped_files'] = [] outputs['warped_files_flags'] = [] for i, params in enumerate(self.inputs.parameters): config = {} with open(params, 'r') as f: for line in f.readlines(): line = line.strip() if not line.startswith('//') and line: m = regex.search(line) if m: value = self._cast(m.group(2).strip()) config[m.group(1).strip()] = value outputs['transform'].append(op.join(out_dir, 'TransformParameters.%01d.txt' % i)) warped_file = None if config['WriteResultImage']: warped_file = op.join(out_dir, 'result.%01d.%s' % (i, config['ResultImageFormat'])) outputs['warped_files'].append(warped_file) outputs['warped_files_flags'].append(config['WriteResultImage']) if outputs['warped_files_flags'][-1]: outputs['warped_file'] = outputs['warped_files'][-1] return outputs def _cast(self, val): if val.startswith('"') and val.endswith('"'): if val == '"true"': return True elif val == '"false"': return False else: return val[1:-1] try: return int(val) except ValueError: try: return float(val) except ValueError: return val class ApplyWarpInputSpec(ElastixBaseInputSpec): transform_file = File(exists=True, mandatory=True, argstr='-tp %s', desc='transform-parameter file, only 1') moving_image = File(exists=True, argstr='-in %s', mandatory=True, desc='input image to deform') class ApplyWarpOutputSpec(TraitedSpec): warped_file = File(desc='input moving image warped to fixed image') class ApplyWarp(CommandLine): """ Use ``transformix`` to apply a transform on an input image. The transform is specified in the transform-parameter file. Example ------- >>> from nipype.interfaces.elastix import ApplyWarp >>> reg = ApplyWarp() >>> reg.inputs.moving_image = 'moving1.nii' # doctest: +SKIP >>> reg.inputs.transform_file = 'TransformParameters.0.txt' # doctest: +SKIP >>> reg.cmdline # doctest: +SKIP 'transformix -in moving1.nii -out ./ -tp TransformParameters.0.txt' """ _cmd = 'transformix' input_spec = ApplyWarpInputSpec output_spec = ApplyWarpOutputSpec def _list_outputs(self): outputs = self._outputs().get() out_dir = op.abspath(self.inputs.output_path) outputs['warped_file'] = op.join(out_dir, 'result.nii.gz') return outputs class AnalyzeWarpInputSpec(ElastixBaseInputSpec): transform_file = File(exists=True, mandatory=True, argstr='-tp %s', desc='transform-parameter file, only 1') class AnalyzeWarpOutputSpec(TraitedSpec): disp_field = File(desc='displacements field') jacdet_map = File(desc='det(Jacobian) map') jacmat_map = File(desc='Jacobian matrix map') class AnalyzeWarp(CommandLine): """ Use transformix to get details from the input transform (generate the corresponding deformation field, generate the determinant of the Jacobian map or the Jacobian map itself) Example ------- >>> from nipype.interfaces.elastix import AnalyzeWarp >>> reg = AnalyzeWarp() >>> reg.inputs.transform_file = 'TransformParameters.0.txt' # doctest: +SKIP >>> reg.cmdline # doctest: +SKIP 'transformix -def all -jac all -jacmat all -out ./ -tp TransformParameters.0.txt' """ _cmd = 'transformix -def all -jac all -jacmat all' input_spec = AnalyzeWarpInputSpec output_spec = AnalyzeWarpOutputSpec def _list_outputs(self): outputs = self._outputs().get() out_dir = op.abspath(self.inputs.output_path) outputs['disp_field'] = op.join(out_dir, 'deformationField.nii.gz') outputs['jacdet_map'] = op.join(out_dir, 'spatialJacobian.nii.gz') outputs['jacmat_map'] = op.join(out_dir, 'fullSpatialJacobian.nii.gz') return outputs class PointsWarpInputSpec(ElastixBaseInputSpec): points_file = File(exists=True, argstr='-def %s', mandatory=True, desc='input points (accepts .vtk triangular meshes).') transform_file = File(exists=True, mandatory=True, argstr='-tp %s', desc='transform-parameter file, only 1') class PointsWarpOutputSpec(TraitedSpec): warped_file = File(desc='input points displaced in fixed image domain') class PointsWarp(CommandLine): """Use ``transformix`` to apply a transform on an input point set. The transform is specified in the transform-parameter file. Example ------- >>> from nipype.interfaces.elastix import PointsWarp >>> reg = PointsWarp() >>> reg.inputs.points_file = 'surf1.vtk' # doctest: +SKIP >>> reg.inputs.transform_file = 'TransformParameters.0.txt' # doctest: +SKIP >>> reg.cmdline # doctest: +SKIP 'transformix -out ./ -def surf1.vtk -tp TransformParameters.0.txt' """ _cmd = 'transformix' input_spec = PointsWarpInputSpec output_spec = PointsWarpOutputSpec def _list_outputs(self): outputs = self._outputs().get() out_dir = op.abspath(self.inputs.output_path) fname, ext = op.splitext(op.basename(self.inputs.points_file)) outputs['warped_file'] = op.join(out_dir, 'outputpoints%s' % ext) return outputs
iglpdc/nipype
nipype/interfaces/elastix/registration.py
Python
bsd-3-clause
8,430
[ "VTK" ]
0411d0ad71f52c2f6a02722287196636eb1bfec9460f5facaec27cf145d64c02
#! /usr/bin/env python # # This file is distributed as part of the Wannier90 code and # under the terms of the GNU General Public License. See the # file `LICENSE' in the root directory of the Wannier90 # distribution, or http://www.gnu.org/copyleft/gpl.txt # # The webpage of the Wannier90 code is www.wannier.org # # The Wannier90 code is hosted on GitHub: # # https://github.com/wannier-developers/wannier90 # # Python2 script to find the indeces of a coarse mesh into a finer mesh # provided they are commensurate. # # Written by Antimo Marrazzo (EPFL) # Last update September 13th, 2016 # import sys import numpy as np def prepare_mesh(coarse_grid,nscf_output_file): import subprocess with open(nscf_output_file, 'r') as f: read_data = f.readlines() f.close() read_kpts=False k_fine_list=[] for line in read_data: if "number of k points=" in line: numk_line=line.strip('\n').split() num_kpoints=int(numk_line[4]) print 'Number of kpoints provided to Yambo through a NSCF calculation', num_kpoints if read_kpts==True: kline=line.strip('\n').split() if 'wk' in kline: a=kline[4:6] b=kline[6].split(')')[0] k_vec=[float(a[0]),float(a[1]),float(b)] k_fine_list.append(k_vec) else: read_kpts=False if "cryst. coord." in line and 'site' not in line: read_kpts=True coarse_text=[str(i)+' ' for i in coarse_grid] k_coarse_mesh=subprocess.check_output(['./kmesh.pl', coarse_text[0],coarse_text[1],coarse_text[2], 'wan']) k_coarse_mesh=k_coarse_mesh.split('\n') k_coarse_list=[] for i in range(coarse_grid[0]*coarse_grid[1]*coarse_grid[2]): line=k_coarse_mesh[i].split() k_coarse_list.append([float(j) for j in line]) return (k_fine_list,k_coarse_list) def indeces_list(fine_mesh,coarse_mesh): import numpy as np opt=np.array([0,1,-1]) k_list=[] for i in coarse_mesh: count=1 for j in fine_mesh: q=i-j q=np.around(q,decimals=5) if (q[0] in opt and q[1] in opt and q[2] in opt): k_list.append(count) count=count+1 return k_list print '####################' print '####Mesh mapper#####' print '####################' coarse_grid = sys.argv[1:4] coarse_grid = [int(i) for i in coarse_grid] print 'Input coarse mesh:', coarse_grid nscf_output_file = sys.argv[4] print 'Path of the QE NSCF output file', nscf_output_file (k_fine_list,k_coarse_list)=prepare_mesh(coarse_grid,nscf_output_file) ind_list=indeces_list(np.array(k_fine_list),np.array(k_coarse_list)) print 'List of k-indeces to pass to Yambo', ind_list for i in ind_list: print str(i)+'|'+str(i)+'|'+'first band'+'|'+'last band'+'|'
greschd/wannier90
utility/k_mapper.py
Python
gpl-2.0
2,939
[ "Wannier90", "Yambo" ]
d1593ce853ba24cb07f94dbaab4e6c3abc7802990aa2de55cb9e42e5c39822af
import json import logging import random import traceback import time import urllib import urllib2 from flask import Flask from flask import request from google.appengine.api import memcache import api from api import PickledThing app = Flask(__name__) app.config['DEBUG'] = True # Note: We don't need to call run() since our application is embedded within # the App Engine WSGI application server. logger = logging.getLogger(__name__) # doesn't work, need to set --log_level=debug in dev server # e.g., in: Edit > Application Settings # logger.setLevel(logging.DEBUG) sakurafish_url = \ "https://data.archive.moe/board/a/image/1434/39/1434397984867.jpg" WEBHOOK = True LAST_4CHAN_API_CALL_TIME = 0 # global a_images_key = 'a:images' # Bot modes settings ECHO = False # echo everything, global @app.route('/') def index(): """Return a friendly HTTP greeting.""" return 'Hello World' @app.errorhandler(404) def page_not_found(e): """Return a custom 404 error.""" return 'Sorry, nothing at this URL.', 404 @app.route('/test', methods=['GET', 'POST']) def test(): """Test""" logger.info("Got these args %s", request.args) # add means only add when key doesn't exist #memcache.add(key='key', value=str(request.args), time=3600) pt = PickledThing.get_by_id(a_images_key) if not pt: logger.error("No entry in Datastore found for key: %s", a_images_key) return "No images" imgs = pt.thing return str(imgs) # return str(memcache.get('a:images')) @app.route('/tasks/update_cache', methods=['GET']) def update_cache(): """Periodic cache update.""" timeout = 60 # avoid GAE request limits (DeadlineExceededError) # NOTE: actually cron jobs have a 10 min limit, but there # is a possibility that if you hit the task url directly it # will think it's not a cron job and impose the regular # limit? t0 = time.time() # get from https://a.4cdn.org/{board}/threads.json board = 'a' response = urllib2.urlopen(api.threads_url(board), timeout=timeout) threads = json.load(response) # response is file-like time.sleep(2) # a nice functional approach (may be faster) all_threads = reduce(lambda x, y: x + y['threads'], threads, []) # all thread numbers, can be inserted into: # http://a.4cdn.org/{board}/thread/{thread_no}.json all_thread_nos = [t['no'] for t in all_threads] all_img_filenames = [] for no in all_thread_nos: try: response = urllib2.urlopen(api.posts_url(board, no), timeout=timeout) except urllib2.HTTPError: logger.warning("Could not open: %s", api.posts_url(board, no)) logger.warning(traceback.format_exc()) continue except Exception: logger.error( "Exception when opening: %s", api.posts_url(board, no)) logger.error(traceback.format_exc()) continue posts = json.load(response)['posts'] # a list of post objects # Note: not all posts contain images img_filenames = [str(p['tim']) + p['ext'] for p in posts if p.get('tim') is not None] all_img_filenames.extend(img_filenames) time.sleep(2) logger.info("Update finished. %s threads, %s img filenames", len(all_thread_nos), len(all_img_filenames)) logger.info("Total time %s", time.time() - t0) # clear all old stuff old_pt = PickledThing.get_by_id(a_images_key) if old_pt: old_pt.key.delete() pt = api.PickledThing(name=a_images_key, thing=all_img_filenames, id=a_images_key) pt_key = pt.put() logger.info("Saved in Datastore under key: %s", pt_key) # memcache.set(key='a:images', value=all_img_filenames) return "a okay" @app.route('/listen', methods=['POST']) def listen(): """Listening to incoming messages from webhook""" logger.info(request.get_json()) if WEBHOOK: update_id, msg = api.get_update(request.get_json()) else: # TODO: updates via long polling; this doesn't work yet (use threads?) # updates = api.get_updates(request.args) # a list of updates # update_id, msg = api.get_latest_update(updates) pass if msg: chat_id = msg['chat']['id'] text = msg.get('text') # TODO: is optional!! has_cmd = api.is_command(msg.get('text', '')) if has_cmd: logger.debug(has_cmd) cmd = has_cmd[0] if cmd == 'echo': try: global ECHO if has_cmd[1] == 'on': ECHO = True api.send_message(chat_id, "Echo turned on") elif has_cmd[1] == 'off': ECHO = False api.send_message(chat_id, "Echo turned off") except Exception: logger.warning("/echo has no second arg") # just catch the exceptions for now... logger.warning(traceback.format_exc()) elif cmd == '4chan': global LAST_4CHAN_API_CALL_TIME now = time.time() pt = PickledThing.get_by_id(a_images_key) if not pt: logger.error("No entry found in Datastore for key: %s", a_images_key) return "No images" imgs = pt.thing # imgs = memcache.get('a:images') #if not imgs: # logger.error("Cache empty for a:images") # return "No images" if now - LAST_4CHAN_API_CALL_TIME < 1: api.send_message(chat_id, "pls be gentle on api") else: # 10 retries for i in range(10): logger.info("Getting random img, try number: %s", i) board = 'a' img_name = random.choice(imgs) img_url = api.img_url(board, img_name) resp = urllib.urlopen(img_url) if resp.getcode() == 200: api.send_message(chat_id, img_url) break else: logger.info("404 url: %s", img_url) # TODO: remove from cache time.sleep(1) LAST_4CHAN_API_CALL_TIME = now elif cmd == 'sakurafish': api.send_message(chat_id, sakurafish_url) elif cmd == 'help': api.send_message( chat_id, "Commands:\n/4chan: random 4chan image (only /a/" " for now)\n/sakurafish: sakurafish") # do not echo commands if ECHO and text and not has_cmd: resp = api.send_message(chat_id, text) logger.debug("Echo result: %s", resp) return "A response"
jackieleng/vierchanbot
main.py
Python
mit
7,203
[ "MOE" ]
62d161adaa6e38e7aa15c61cc268a6dfa171738a8f799f54c07d3d7b270121f7